1 /* 2 * linux/net/iucv/af_iucv.c 3 * 4 * IUCV protocol stack for Linux on zSeries 5 * 6 * Copyright 2006 IBM Corporation 7 * 8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/list.h> 14 #include <linux/errno.h> 15 #include <linux/kernel.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/poll.h> 21 #include <net/sock.h> 22 #include <asm/ebcdic.h> 23 #include <asm/cpcmd.h> 24 #include <linux/kmod.h> 25 26 #include <net/iucv/iucv.h> 27 #include <net/iucv/af_iucv.h> 28 29 #define CONFIG_IUCV_SOCK_DEBUG 1 30 31 #define IPRMDATA 0x80 32 #define VERSION "1.0" 33 34 static char iucv_userid[80]; 35 36 static struct proto_ops iucv_sock_ops; 37 38 static struct proto iucv_proto = { 39 .name = "AF_IUCV", 40 .owner = THIS_MODULE, 41 .obj_size = sizeof(struct iucv_sock), 42 }; 43 44 static void iucv_sock_kill(struct sock *sk); 45 static void iucv_sock_close(struct sock *sk); 46 47 /* Call Back functions */ 48 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 49 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 50 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); 51 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], 52 u8 ipuser[16]); 53 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 54 55 static struct iucv_sock_list iucv_sk_list = { 56 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 57 .autobind_name = ATOMIC_INIT(0) 58 }; 59 60 static struct iucv_handler af_iucv_handler = { 61 .path_pending = iucv_callback_connreq, 62 .path_complete = iucv_callback_connack, 63 .path_severed = iucv_callback_connrej, 64 .message_pending = iucv_callback_rx, 65 .message_complete = iucv_callback_txdone 66 }; 67 68 static inline void high_nmcpy(unsigned char *dst, char *src) 69 { 70 memcpy(dst, src, 8); 71 } 72 73 static inline void low_nmcpy(unsigned char *dst, char *src) 74 { 75 memcpy(&dst[8], src, 8); 76 } 77 78 /* Timers */ 79 static void iucv_sock_timeout(unsigned long arg) 80 { 81 struct sock *sk = (struct sock *)arg; 82 83 bh_lock_sock(sk); 84 sk->sk_err = ETIMEDOUT; 85 sk->sk_state_change(sk); 86 bh_unlock_sock(sk); 87 88 iucv_sock_kill(sk); 89 sock_put(sk); 90 } 91 92 static void iucv_sock_clear_timer(struct sock *sk) 93 { 94 sk_stop_timer(sk, &sk->sk_timer); 95 } 96 97 static struct sock *__iucv_get_sock_by_name(char *nm) 98 { 99 struct sock *sk; 100 struct hlist_node *node; 101 102 sk_for_each(sk, node, &iucv_sk_list.head) 103 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 104 return sk; 105 106 return NULL; 107 } 108 109 static void iucv_sock_destruct(struct sock *sk) 110 { 111 skb_queue_purge(&sk->sk_receive_queue); 112 skb_queue_purge(&sk->sk_write_queue); 113 } 114 115 /* Cleanup Listen */ 116 static void iucv_sock_cleanup_listen(struct sock *parent) 117 { 118 struct sock *sk; 119 120 /* Close non-accepted connections */ 121 while ((sk = iucv_accept_dequeue(parent, NULL))) { 122 iucv_sock_close(sk); 123 iucv_sock_kill(sk); 124 } 125 126 parent->sk_state = IUCV_CLOSED; 127 sock_set_flag(parent, SOCK_ZAPPED); 128 } 129 130 /* Kill socket */ 131 static void iucv_sock_kill(struct sock *sk) 132 { 133 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 134 return; 135 136 iucv_sock_unlink(&iucv_sk_list, sk); 137 sock_set_flag(sk, SOCK_DEAD); 138 sock_put(sk); 139 } 140 141 /* Close an IUCV socket */ 142 static void iucv_sock_close(struct sock *sk) 143 { 144 unsigned char user_data[16]; 145 struct iucv_sock *iucv = iucv_sk(sk); 146 int err; 147 unsigned long timeo; 148 149 iucv_sock_clear_timer(sk); 150 lock_sock(sk); 151 152 switch (sk->sk_state) { 153 case IUCV_LISTEN: 154 iucv_sock_cleanup_listen(sk); 155 break; 156 157 case IUCV_CONNECTED: 158 case IUCV_DISCONN: 159 err = 0; 160 161 sk->sk_state = IUCV_CLOSING; 162 sk->sk_state_change(sk); 163 164 if (!skb_queue_empty(&iucv->send_skb_q)) { 165 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 166 timeo = sk->sk_lingertime; 167 else 168 timeo = IUCV_DISCONN_TIMEOUT; 169 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); 170 } 171 172 sk->sk_state = IUCV_CLOSED; 173 sk->sk_state_change(sk); 174 175 if (iucv->path) { 176 low_nmcpy(user_data, iucv->src_name); 177 high_nmcpy(user_data, iucv->dst_name); 178 ASCEBC(user_data, sizeof(user_data)); 179 err = iucv_path_sever(iucv->path, user_data); 180 iucv_path_free(iucv->path); 181 iucv->path = NULL; 182 } 183 184 sk->sk_err = ECONNRESET; 185 sk->sk_state_change(sk); 186 187 skb_queue_purge(&iucv->send_skb_q); 188 skb_queue_purge(&iucv->backlog_skb_q); 189 190 sock_set_flag(sk, SOCK_ZAPPED); 191 break; 192 193 default: 194 sock_set_flag(sk, SOCK_ZAPPED); 195 break; 196 } 197 198 release_sock(sk); 199 iucv_sock_kill(sk); 200 } 201 202 static void iucv_sock_init(struct sock *sk, struct sock *parent) 203 { 204 if (parent) 205 sk->sk_type = parent->sk_type; 206 } 207 208 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) 209 { 210 struct sock *sk; 211 212 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); 213 if (!sk) 214 return NULL; 215 216 sock_init_data(sock, sk); 217 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 218 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 219 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 220 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 221 spin_lock_init(&iucv_sk(sk)->message_q.lock); 222 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 223 iucv_sk(sk)->send_tag = 0; 224 225 sk->sk_destruct = iucv_sock_destruct; 226 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 227 sk->sk_allocation = GFP_DMA; 228 229 sock_reset_flag(sk, SOCK_ZAPPED); 230 231 sk->sk_protocol = proto; 232 sk->sk_state = IUCV_OPEN; 233 234 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk); 235 236 iucv_sock_link(&iucv_sk_list, sk); 237 return sk; 238 } 239 240 /* Create an IUCV socket */ 241 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol) 242 { 243 struct sock *sk; 244 245 if (sock->type != SOCK_STREAM) 246 return -ESOCKTNOSUPPORT; 247 248 sock->state = SS_UNCONNECTED; 249 sock->ops = &iucv_sock_ops; 250 251 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); 252 if (!sk) 253 return -ENOMEM; 254 255 iucv_sock_init(sk, NULL); 256 257 return 0; 258 } 259 260 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) 261 { 262 write_lock_bh(&l->lock); 263 sk_add_node(sk, &l->head); 264 write_unlock_bh(&l->lock); 265 } 266 267 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) 268 { 269 write_lock_bh(&l->lock); 270 sk_del_node_init(sk); 271 write_unlock_bh(&l->lock); 272 } 273 274 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 275 { 276 unsigned long flags; 277 struct iucv_sock *par = iucv_sk(parent); 278 279 sock_hold(sk); 280 spin_lock_irqsave(&par->accept_q_lock, flags); 281 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); 282 spin_unlock_irqrestore(&par->accept_q_lock, flags); 283 iucv_sk(sk)->parent = parent; 284 parent->sk_ack_backlog++; 285 } 286 287 void iucv_accept_unlink(struct sock *sk) 288 { 289 unsigned long flags; 290 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); 291 292 spin_lock_irqsave(&par->accept_q_lock, flags); 293 list_del_init(&iucv_sk(sk)->accept_q); 294 spin_unlock_irqrestore(&par->accept_q_lock, flags); 295 iucv_sk(sk)->parent->sk_ack_backlog--; 296 iucv_sk(sk)->parent = NULL; 297 sock_put(sk); 298 } 299 300 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) 301 { 302 struct iucv_sock *isk, *n; 303 struct sock *sk; 304 305 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 306 sk = (struct sock *) isk; 307 lock_sock(sk); 308 309 if (sk->sk_state == IUCV_CLOSED) { 310 iucv_accept_unlink(sk); 311 release_sock(sk); 312 continue; 313 } 314 315 if (sk->sk_state == IUCV_CONNECTED || 316 sk->sk_state == IUCV_SEVERED || 317 !newsock) { 318 iucv_accept_unlink(sk); 319 if (newsock) 320 sock_graft(sk, newsock); 321 322 if (sk->sk_state == IUCV_SEVERED) 323 sk->sk_state = IUCV_DISCONN; 324 325 release_sock(sk); 326 return sk; 327 } 328 329 release_sock(sk); 330 } 331 return NULL; 332 } 333 334 int iucv_sock_wait_state(struct sock *sk, int state, int state2, 335 unsigned long timeo) 336 { 337 DECLARE_WAITQUEUE(wait, current); 338 int err = 0; 339 340 add_wait_queue(sk->sk_sleep, &wait); 341 while (sk->sk_state != state && sk->sk_state != state2) { 342 set_current_state(TASK_INTERRUPTIBLE); 343 344 if (!timeo) { 345 err = -EAGAIN; 346 break; 347 } 348 349 if (signal_pending(current)) { 350 err = sock_intr_errno(timeo); 351 break; 352 } 353 354 release_sock(sk); 355 timeo = schedule_timeout(timeo); 356 lock_sock(sk); 357 358 err = sock_error(sk); 359 if (err) 360 break; 361 } 362 set_current_state(TASK_RUNNING); 363 remove_wait_queue(sk->sk_sleep, &wait); 364 return err; 365 } 366 367 /* Bind an unbound socket */ 368 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 369 int addr_len) 370 { 371 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 372 struct sock *sk = sock->sk; 373 struct iucv_sock *iucv; 374 int err; 375 376 /* Verify the input sockaddr */ 377 if (!addr || addr->sa_family != AF_IUCV) 378 return -EINVAL; 379 380 lock_sock(sk); 381 if (sk->sk_state != IUCV_OPEN) { 382 err = -EBADFD; 383 goto done; 384 } 385 386 write_lock_bh(&iucv_sk_list.lock); 387 388 iucv = iucv_sk(sk); 389 if (__iucv_get_sock_by_name(sa->siucv_name)) { 390 err = -EADDRINUSE; 391 goto done_unlock; 392 } 393 if (iucv->path) { 394 err = 0; 395 goto done_unlock; 396 } 397 398 /* Bind the socket */ 399 memcpy(iucv->src_name, sa->siucv_name, 8); 400 401 /* Copy the user id */ 402 memcpy(iucv->src_user_id, iucv_userid, 8); 403 sk->sk_state = IUCV_BOUND; 404 err = 0; 405 406 done_unlock: 407 /* Release the socket list lock */ 408 write_unlock_bh(&iucv_sk_list.lock); 409 done: 410 release_sock(sk); 411 return err; 412 } 413 414 /* Automatically bind an unbound socket */ 415 static int iucv_sock_autobind(struct sock *sk) 416 { 417 struct iucv_sock *iucv = iucv_sk(sk); 418 char query_buffer[80]; 419 char name[12]; 420 int err = 0; 421 422 /* Set the userid and name */ 423 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err); 424 if (unlikely(err)) 425 return -EPROTO; 426 427 memcpy(iucv->src_user_id, query_buffer, 8); 428 429 write_lock_bh(&iucv_sk_list.lock); 430 431 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); 432 while (__iucv_get_sock_by_name(name)) { 433 sprintf(name, "%08x", 434 atomic_inc_return(&iucv_sk_list.autobind_name)); 435 } 436 437 write_unlock_bh(&iucv_sk_list.lock); 438 439 memcpy(&iucv->src_name, name, 8); 440 441 return err; 442 } 443 444 /* Connect an unconnected socket */ 445 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 446 int alen, int flags) 447 { 448 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 449 struct sock *sk = sock->sk; 450 struct iucv_sock *iucv; 451 unsigned char user_data[16]; 452 int err; 453 454 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) 455 return -EINVAL; 456 457 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 458 return -EBADFD; 459 460 if (sk->sk_type != SOCK_STREAM) 461 return -EINVAL; 462 463 iucv = iucv_sk(sk); 464 465 if (sk->sk_state == IUCV_OPEN) { 466 err = iucv_sock_autobind(sk); 467 if (unlikely(err)) 468 return err; 469 } 470 471 lock_sock(sk); 472 473 /* Set the destination information */ 474 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); 475 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); 476 477 high_nmcpy(user_data, sa->siucv_name); 478 low_nmcpy(user_data, iucv_sk(sk)->src_name); 479 ASCEBC(user_data, sizeof(user_data)); 480 481 iucv = iucv_sk(sk); 482 /* Create path. */ 483 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT, 484 IPRMDATA, GFP_KERNEL); 485 if (!iucv->path) { 486 err = -ENOMEM; 487 goto done; 488 } 489 err = iucv_path_connect(iucv->path, &af_iucv_handler, 490 sa->siucv_user_id, NULL, user_data, sk); 491 if (err) { 492 iucv_path_free(iucv->path); 493 iucv->path = NULL; 494 err = -ECONNREFUSED; 495 goto done; 496 } 497 498 if (sk->sk_state != IUCV_CONNECTED) { 499 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, 500 sock_sndtimeo(sk, flags & O_NONBLOCK)); 501 } 502 503 if (sk->sk_state == IUCV_DISCONN) { 504 release_sock(sk); 505 return -ECONNREFUSED; 506 } 507 done: 508 release_sock(sk); 509 return err; 510 } 511 512 /* Move a socket into listening state. */ 513 static int iucv_sock_listen(struct socket *sock, int backlog) 514 { 515 struct sock *sk = sock->sk; 516 int err; 517 518 lock_sock(sk); 519 520 err = -EINVAL; 521 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM) 522 goto done; 523 524 sk->sk_max_ack_backlog = backlog; 525 sk->sk_ack_backlog = 0; 526 sk->sk_state = IUCV_LISTEN; 527 err = 0; 528 529 done: 530 release_sock(sk); 531 return err; 532 } 533 534 /* Accept a pending connection */ 535 static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 536 int flags) 537 { 538 DECLARE_WAITQUEUE(wait, current); 539 struct sock *sk = sock->sk, *nsk; 540 long timeo; 541 int err = 0; 542 543 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 544 545 if (sk->sk_state != IUCV_LISTEN) { 546 err = -EBADFD; 547 goto done; 548 } 549 550 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 551 552 /* Wait for an incoming connection */ 553 add_wait_queue_exclusive(sk->sk_sleep, &wait); 554 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 555 set_current_state(TASK_INTERRUPTIBLE); 556 if (!timeo) { 557 err = -EAGAIN; 558 break; 559 } 560 561 release_sock(sk); 562 timeo = schedule_timeout(timeo); 563 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 564 565 if (sk->sk_state != IUCV_LISTEN) { 566 err = -EBADFD; 567 break; 568 } 569 570 if (signal_pending(current)) { 571 err = sock_intr_errno(timeo); 572 break; 573 } 574 } 575 576 set_current_state(TASK_RUNNING); 577 remove_wait_queue(sk->sk_sleep, &wait); 578 579 if (err) 580 goto done; 581 582 newsock->state = SS_CONNECTED; 583 584 done: 585 release_sock(sk); 586 return err; 587 } 588 589 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, 590 int *len, int peer) 591 { 592 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 593 struct sock *sk = sock->sk; 594 595 addr->sa_family = AF_IUCV; 596 *len = sizeof(struct sockaddr_iucv); 597 598 if (peer) { 599 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); 600 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); 601 } else { 602 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); 603 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); 604 } 605 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 606 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 607 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 608 609 return 0; 610 } 611 612 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 613 struct msghdr *msg, size_t len) 614 { 615 struct sock *sk = sock->sk; 616 struct iucv_sock *iucv = iucv_sk(sk); 617 struct sk_buff *skb; 618 struct iucv_message txmsg; 619 int err; 620 621 err = sock_error(sk); 622 if (err) 623 return err; 624 625 if (msg->msg_flags & MSG_OOB) 626 return -EOPNOTSUPP; 627 628 lock_sock(sk); 629 630 if (sk->sk_shutdown & SEND_SHUTDOWN) { 631 err = -EPIPE; 632 goto out; 633 } 634 635 if (sk->sk_state == IUCV_CONNECTED) { 636 if (!(skb = sock_alloc_send_skb(sk, len, 637 msg->msg_flags & MSG_DONTWAIT, 638 &err))) 639 goto out; 640 641 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 642 err = -EFAULT; 643 goto fail; 644 } 645 646 txmsg.class = 0; 647 txmsg.tag = iucv->send_tag++; 648 memcpy(skb->cb, &txmsg.tag, 4); 649 skb_queue_tail(&iucv->send_skb_q, skb); 650 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 651 (void *) skb->data, skb->len); 652 if (err) { 653 if (err == 3) 654 printk(KERN_ERR "AF_IUCV msg limit exceeded\n"); 655 skb_unlink(skb, &iucv->send_skb_q); 656 err = -EPIPE; 657 goto fail; 658 } 659 660 } else { 661 err = -ENOTCONN; 662 goto out; 663 } 664 665 release_sock(sk); 666 return len; 667 668 fail: 669 kfree_skb(skb); 670 out: 671 release_sock(sk); 672 return err; 673 } 674 675 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) 676 { 677 int dataleft, size, copied = 0; 678 struct sk_buff *nskb; 679 680 dataleft = len; 681 while (dataleft) { 682 if (dataleft >= sk->sk_rcvbuf / 4) 683 size = sk->sk_rcvbuf / 4; 684 else 685 size = dataleft; 686 687 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); 688 if (!nskb) 689 return -ENOMEM; 690 691 memcpy(nskb->data, skb->data + copied, size); 692 copied += size; 693 dataleft -= size; 694 695 skb_reset_transport_header(nskb); 696 skb_reset_network_header(nskb); 697 nskb->len = size; 698 699 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); 700 } 701 702 return 0; 703 } 704 705 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 706 struct iucv_path *path, 707 struct iucv_message *msg) 708 { 709 int rc; 710 711 if (msg->flags & IPRMDATA) { 712 skb->data = NULL; 713 skb->len = 0; 714 } else { 715 rc = iucv_message_receive(path, msg, 0, skb->data, 716 msg->length, NULL); 717 if (rc) { 718 kfree_skb(skb); 719 return; 720 } 721 if (skb->truesize >= sk->sk_rcvbuf / 4) { 722 rc = iucv_fragment_skb(sk, skb, msg->length); 723 kfree_skb(skb); 724 skb = NULL; 725 if (rc) { 726 iucv_path_sever(path, NULL); 727 return; 728 } 729 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 730 } else { 731 skb_reset_transport_header(skb); 732 skb_reset_network_header(skb); 733 skb->len = msg->length; 734 } 735 } 736 737 if (sock_queue_rcv_skb(sk, skb)) 738 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 739 } 740 741 static void iucv_process_message_q(struct sock *sk) 742 { 743 struct iucv_sock *iucv = iucv_sk(sk); 744 struct sk_buff *skb; 745 struct sock_msg_q *p, *n; 746 747 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 748 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); 749 if (!skb) 750 break; 751 iucv_process_message(sk, skb, p->path, &p->msg); 752 list_del(&p->list); 753 kfree(p); 754 if (!skb_queue_empty(&iucv->backlog_skb_q)) 755 break; 756 } 757 } 758 759 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 760 struct msghdr *msg, size_t len, int flags) 761 { 762 int noblock = flags & MSG_DONTWAIT; 763 struct sock *sk = sock->sk; 764 struct iucv_sock *iucv = iucv_sk(sk); 765 int target, copied = 0; 766 struct sk_buff *skb, *rskb, *cskb; 767 int err = 0; 768 769 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 770 skb_queue_empty(&iucv->backlog_skb_q) && 771 skb_queue_empty(&sk->sk_receive_queue) && 772 list_empty(&iucv->message_q.list)) 773 return 0; 774 775 if (flags & (MSG_OOB)) 776 return -EOPNOTSUPP; 777 778 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 779 780 skb = skb_recv_datagram(sk, flags, noblock, &err); 781 if (!skb) { 782 if (sk->sk_shutdown & RCV_SHUTDOWN) 783 return 0; 784 return err; 785 } 786 787 copied = min_t(unsigned int, skb->len, len); 788 789 cskb = skb; 790 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 791 skb_queue_head(&sk->sk_receive_queue, skb); 792 if (copied == 0) 793 return -EFAULT; 794 goto done; 795 } 796 797 len -= copied; 798 799 /* Mark read part of skb as used */ 800 if (!(flags & MSG_PEEK)) { 801 skb_pull(skb, copied); 802 803 if (skb->len) { 804 skb_queue_head(&sk->sk_receive_queue, skb); 805 goto done; 806 } 807 808 kfree_skb(skb); 809 810 /* Queue backlog skbs */ 811 rskb = skb_dequeue(&iucv->backlog_skb_q); 812 while (rskb) { 813 if (sock_queue_rcv_skb(sk, rskb)) { 814 skb_queue_head(&iucv->backlog_skb_q, 815 rskb); 816 break; 817 } else { 818 rskb = skb_dequeue(&iucv->backlog_skb_q); 819 } 820 } 821 if (skb_queue_empty(&iucv->backlog_skb_q)) { 822 spin_lock_bh(&iucv->message_q.lock); 823 if (!list_empty(&iucv->message_q.list)) 824 iucv_process_message_q(sk); 825 spin_unlock_bh(&iucv->message_q.lock); 826 } 827 828 } else 829 skb_queue_head(&sk->sk_receive_queue, skb); 830 831 done: 832 return err ? : copied; 833 } 834 835 static inline unsigned int iucv_accept_poll(struct sock *parent) 836 { 837 struct iucv_sock *isk, *n; 838 struct sock *sk; 839 840 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 841 sk = (struct sock *) isk; 842 843 if (sk->sk_state == IUCV_CONNECTED) 844 return POLLIN | POLLRDNORM; 845 } 846 847 return 0; 848 } 849 850 unsigned int iucv_sock_poll(struct file *file, struct socket *sock, 851 poll_table *wait) 852 { 853 struct sock *sk = sock->sk; 854 unsigned int mask = 0; 855 856 poll_wait(file, sk->sk_sleep, wait); 857 858 if (sk->sk_state == IUCV_LISTEN) 859 return iucv_accept_poll(sk); 860 861 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 862 mask |= POLLERR; 863 864 if (sk->sk_shutdown & RCV_SHUTDOWN) 865 mask |= POLLRDHUP; 866 867 if (sk->sk_shutdown == SHUTDOWN_MASK) 868 mask |= POLLHUP; 869 870 if (!skb_queue_empty(&sk->sk_receive_queue) || 871 (sk->sk_shutdown & RCV_SHUTDOWN)) 872 mask |= POLLIN | POLLRDNORM; 873 874 if (sk->sk_state == IUCV_CLOSED) 875 mask |= POLLHUP; 876 877 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) 878 mask |= POLLIN; 879 880 if (sock_writeable(sk)) 881 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 882 else 883 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 884 885 return mask; 886 } 887 888 static int iucv_sock_shutdown(struct socket *sock, int how) 889 { 890 struct sock *sk = sock->sk; 891 struct iucv_sock *iucv = iucv_sk(sk); 892 struct iucv_message txmsg; 893 int err = 0; 894 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 895 896 how++; 897 898 if ((how & ~SHUTDOWN_MASK) || !how) 899 return -EINVAL; 900 901 lock_sock(sk); 902 switch (sk->sk_state) { 903 case IUCV_CLOSED: 904 err = -ENOTCONN; 905 goto fail; 906 907 default: 908 sk->sk_shutdown |= how; 909 break; 910 } 911 912 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 913 txmsg.class = 0; 914 txmsg.tag = 0; 915 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 916 (void *) prmmsg, 8); 917 if (err) { 918 switch (err) { 919 case 1: 920 err = -ENOTCONN; 921 break; 922 case 2: 923 err = -ECONNRESET; 924 break; 925 default: 926 err = -ENOTCONN; 927 break; 928 } 929 } 930 } 931 932 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 933 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); 934 if (err) 935 err = -ENOTCONN; 936 937 skb_queue_purge(&sk->sk_receive_queue); 938 } 939 940 /* Wake up anyone sleeping in poll */ 941 sk->sk_state_change(sk); 942 943 fail: 944 release_sock(sk); 945 return err; 946 } 947 948 static int iucv_sock_release(struct socket *sock) 949 { 950 struct sock *sk = sock->sk; 951 int err = 0; 952 953 if (!sk) 954 return 0; 955 956 iucv_sock_close(sk); 957 958 /* Unregister with IUCV base support */ 959 if (iucv_sk(sk)->path) { 960 iucv_path_sever(iucv_sk(sk)->path, NULL); 961 iucv_path_free(iucv_sk(sk)->path); 962 iucv_sk(sk)->path = NULL; 963 } 964 965 sock_orphan(sk); 966 iucv_sock_kill(sk); 967 return err; 968 } 969 970 /* Callback wrappers - called from iucv base support */ 971 static int iucv_callback_connreq(struct iucv_path *path, 972 u8 ipvmid[8], u8 ipuser[16]) 973 { 974 unsigned char user_data[16]; 975 unsigned char nuser_data[16]; 976 unsigned char src_name[8]; 977 struct hlist_node *node; 978 struct sock *sk, *nsk; 979 struct iucv_sock *iucv, *niucv; 980 int err; 981 982 memcpy(src_name, ipuser, 8); 983 EBCASC(src_name, 8); 984 /* Find out if this path belongs to af_iucv. */ 985 read_lock(&iucv_sk_list.lock); 986 iucv = NULL; 987 sk = NULL; 988 sk_for_each(sk, node, &iucv_sk_list.head) 989 if (sk->sk_state == IUCV_LISTEN && 990 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 991 /* 992 * Found a listening socket with 993 * src_name == ipuser[0-7]. 994 */ 995 iucv = iucv_sk(sk); 996 break; 997 } 998 read_unlock(&iucv_sk_list.lock); 999 if (!iucv) 1000 /* No socket found, not one of our paths. */ 1001 return -EINVAL; 1002 1003 bh_lock_sock(sk); 1004 1005 /* Check if parent socket is listening */ 1006 low_nmcpy(user_data, iucv->src_name); 1007 high_nmcpy(user_data, iucv->dst_name); 1008 ASCEBC(user_data, sizeof(user_data)); 1009 if (sk->sk_state != IUCV_LISTEN) { 1010 err = iucv_path_sever(path, user_data); 1011 goto fail; 1012 } 1013 1014 /* Check for backlog size */ 1015 if (sk_acceptq_is_full(sk)) { 1016 err = iucv_path_sever(path, user_data); 1017 goto fail; 1018 } 1019 1020 /* Create the new socket */ 1021 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1022 if (!nsk) { 1023 err = iucv_path_sever(path, user_data); 1024 goto fail; 1025 } 1026 1027 niucv = iucv_sk(nsk); 1028 iucv_sock_init(nsk, sk); 1029 1030 /* Set the new iucv_sock */ 1031 memcpy(niucv->dst_name, ipuser + 8, 8); 1032 EBCASC(niucv->dst_name, 8); 1033 memcpy(niucv->dst_user_id, ipvmid, 8); 1034 memcpy(niucv->src_name, iucv->src_name, 8); 1035 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1036 niucv->path = path; 1037 1038 /* Call iucv_accept */ 1039 high_nmcpy(nuser_data, ipuser + 8); 1040 memcpy(nuser_data + 8, niucv->src_name, 8); 1041 ASCEBC(nuser_data + 8, 8); 1042 1043 path->msglim = IUCV_QUEUELEN_DEFAULT; 1044 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1045 if (err) { 1046 err = iucv_path_sever(path, user_data); 1047 goto fail; 1048 } 1049 1050 iucv_accept_enqueue(sk, nsk); 1051 1052 /* Wake up accept */ 1053 nsk->sk_state = IUCV_CONNECTED; 1054 sk->sk_data_ready(sk, 1); 1055 err = 0; 1056 fail: 1057 bh_unlock_sock(sk); 1058 return 0; 1059 } 1060 1061 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1062 { 1063 struct sock *sk = path->private; 1064 1065 sk->sk_state = IUCV_CONNECTED; 1066 sk->sk_state_change(sk); 1067 } 1068 1069 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1070 { 1071 struct sock *sk = path->private; 1072 struct iucv_sock *iucv = iucv_sk(sk); 1073 struct sk_buff *skb; 1074 struct sock_msg_q *save_msg; 1075 int len; 1076 1077 if (sk->sk_shutdown & RCV_SHUTDOWN) 1078 return; 1079 1080 if (!list_empty(&iucv->message_q.list) || 1081 !skb_queue_empty(&iucv->backlog_skb_q)) 1082 goto save_message; 1083 1084 len = atomic_read(&sk->sk_rmem_alloc); 1085 len += msg->length + sizeof(struct sk_buff); 1086 if (len > sk->sk_rcvbuf) 1087 goto save_message; 1088 1089 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); 1090 if (!skb) 1091 goto save_message; 1092 1093 spin_lock(&iucv->message_q.lock); 1094 iucv_process_message(sk, skb, path, msg); 1095 spin_unlock(&iucv->message_q.lock); 1096 1097 return; 1098 1099 save_message: 1100 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1101 if (!save_msg) 1102 return; 1103 save_msg->path = path; 1104 save_msg->msg = *msg; 1105 1106 spin_lock(&iucv->message_q.lock); 1107 list_add_tail(&save_msg->list, &iucv->message_q.list); 1108 spin_unlock(&iucv->message_q.lock); 1109 } 1110 1111 static void iucv_callback_txdone(struct iucv_path *path, 1112 struct iucv_message *msg) 1113 { 1114 struct sock *sk = path->private; 1115 struct sk_buff *this = NULL; 1116 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; 1117 struct sk_buff *list_skb = list->next; 1118 unsigned long flags; 1119 1120 if (!skb_queue_empty(list)) { 1121 spin_lock_irqsave(&list->lock, flags); 1122 1123 while (list_skb != (struct sk_buff *)list) { 1124 if (!memcmp(&msg->tag, list_skb->cb, 4)) { 1125 this = list_skb; 1126 break; 1127 } 1128 list_skb = list_skb->next; 1129 } 1130 if (this) 1131 __skb_unlink(this, list); 1132 1133 spin_unlock_irqrestore(&list->lock, flags); 1134 1135 if (this) 1136 kfree_skb(this); 1137 } 1138 if (!this) 1139 printk(KERN_ERR "AF_IUCV msg tag %u not found\n", msg->tag); 1140 1141 if (sk->sk_state == IUCV_CLOSING) { 1142 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1143 sk->sk_state = IUCV_CLOSED; 1144 sk->sk_state_change(sk); 1145 } 1146 } 1147 1148 } 1149 1150 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1151 { 1152 struct sock *sk = path->private; 1153 1154 if (!list_empty(&iucv_sk(sk)->accept_q)) 1155 sk->sk_state = IUCV_SEVERED; 1156 else 1157 sk->sk_state = IUCV_DISCONN; 1158 1159 sk->sk_state_change(sk); 1160 } 1161 1162 static struct proto_ops iucv_sock_ops = { 1163 .family = PF_IUCV, 1164 .owner = THIS_MODULE, 1165 .release = iucv_sock_release, 1166 .bind = iucv_sock_bind, 1167 .connect = iucv_sock_connect, 1168 .listen = iucv_sock_listen, 1169 .accept = iucv_sock_accept, 1170 .getname = iucv_sock_getname, 1171 .sendmsg = iucv_sock_sendmsg, 1172 .recvmsg = iucv_sock_recvmsg, 1173 .poll = iucv_sock_poll, 1174 .ioctl = sock_no_ioctl, 1175 .mmap = sock_no_mmap, 1176 .socketpair = sock_no_socketpair, 1177 .shutdown = iucv_sock_shutdown, 1178 .setsockopt = sock_no_setsockopt, 1179 .getsockopt = sock_no_getsockopt 1180 }; 1181 1182 static struct net_proto_family iucv_sock_family_ops = { 1183 .family = AF_IUCV, 1184 .owner = THIS_MODULE, 1185 .create = iucv_sock_create, 1186 }; 1187 1188 static int __init afiucv_init(void) 1189 { 1190 int err; 1191 1192 if (!MACHINE_IS_VM) { 1193 printk(KERN_ERR "AF_IUCV connection needs VM as base\n"); 1194 err = -EPROTONOSUPPORT; 1195 goto out; 1196 } 1197 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 1198 if (unlikely(err)) { 1199 printk(KERN_ERR "AF_IUCV needs the VM userid\n"); 1200 err = -EPROTONOSUPPORT; 1201 goto out; 1202 } 1203 1204 err = iucv_register(&af_iucv_handler, 0); 1205 if (err) 1206 goto out; 1207 err = proto_register(&iucv_proto, 0); 1208 if (err) 1209 goto out_iucv; 1210 err = sock_register(&iucv_sock_family_ops); 1211 if (err) 1212 goto out_proto; 1213 printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n"); 1214 return 0; 1215 1216 out_proto: 1217 proto_unregister(&iucv_proto); 1218 out_iucv: 1219 iucv_unregister(&af_iucv_handler, 0); 1220 out: 1221 return err; 1222 } 1223 1224 static void __exit afiucv_exit(void) 1225 { 1226 sock_unregister(PF_IUCV); 1227 proto_unregister(&iucv_proto); 1228 iucv_unregister(&af_iucv_handler, 0); 1229 1230 printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n"); 1231 } 1232 1233 module_init(afiucv_init); 1234 module_exit(afiucv_exit); 1235 1236 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); 1237 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); 1238 MODULE_VERSION(VERSION); 1239 MODULE_LICENSE("GPL"); 1240 MODULE_ALIAS_NETPROTO(PF_IUCV); 1241