1 /* 2 * IUCV protocol stack for Linux on zSeries 3 * 4 * Copyright IBM Corp. 2006, 2009 5 * 6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 8 * PM functions: 9 * Ursula Braun <ursula.braun@de.ibm.com> 10 */ 11 12 #define KMSG_COMPONENT "af_iucv" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/list.h> 18 #include <linux/errno.h> 19 #include <linux/kernel.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/skbuff.h> 23 #include <linux/init.h> 24 #include <linux/poll.h> 25 #include <net/sock.h> 26 #include <asm/ebcdic.h> 27 #include <asm/cpcmd.h> 28 #include <linux/kmod.h> 29 30 #include <net/iucv/iucv.h> 31 #include <net/iucv/af_iucv.h> 32 33 #define VERSION "1.1" 34 35 static char iucv_userid[80]; 36 37 static const struct proto_ops iucv_sock_ops; 38 39 static struct proto iucv_proto = { 40 .name = "AF_IUCV", 41 .owner = THIS_MODULE, 42 .obj_size = sizeof(struct iucv_sock), 43 }; 44 45 /* special AF_IUCV IPRM messages */ 46 static const u8 iprm_shutdown[8] = 47 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 48 49 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 50 51 /* macros to set/get socket control buffer at correct offset */ 52 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ 53 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) 54 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ 55 #define CB_TRGCLS_LEN (TRGCLS_SIZE) 56 57 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 58 do { \ 59 DEFINE_WAIT(__wait); \ 60 long __timeo = timeo; \ 61 ret = 0; \ 62 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ 63 while (!(condition)) { \ 64 if (!__timeo) { \ 65 ret = -EAGAIN; \ 66 break; \ 67 } \ 68 if (signal_pending(current)) { \ 69 ret = sock_intr_errno(__timeo); \ 70 break; \ 71 } \ 72 release_sock(sk); \ 73 __timeo = schedule_timeout(__timeo); \ 74 lock_sock(sk); \ 75 ret = sock_error(sk); \ 76 if (ret) \ 77 break; \ 78 } \ 79 finish_wait(sk_sleep(sk), &__wait); \ 80 } while (0) 81 82 #define iucv_sock_wait(sk, condition, timeo) \ 83 ({ \ 84 int __ret = 0; \ 85 if (!(condition)) \ 86 __iucv_sock_wait(sk, condition, timeo, __ret); \ 87 __ret; \ 88 }) 89 90 static void iucv_sock_kill(struct sock *sk); 91 static void iucv_sock_close(struct sock *sk); 92 93 /* Call Back functions */ 94 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 95 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 96 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); 97 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], 98 u8 ipuser[16]); 99 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 100 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]); 101 102 static struct iucv_sock_list iucv_sk_list = { 103 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 104 .autobind_name = ATOMIC_INIT(0) 105 }; 106 107 static struct iucv_handler af_iucv_handler = { 108 .path_pending = iucv_callback_connreq, 109 .path_complete = iucv_callback_connack, 110 .path_severed = iucv_callback_connrej, 111 .message_pending = iucv_callback_rx, 112 .message_complete = iucv_callback_txdone, 113 .path_quiesced = iucv_callback_shutdown, 114 }; 115 116 static inline void high_nmcpy(unsigned char *dst, char *src) 117 { 118 memcpy(dst, src, 8); 119 } 120 121 static inline void low_nmcpy(unsigned char *dst, char *src) 122 { 123 memcpy(&dst[8], src, 8); 124 } 125 126 static int afiucv_pm_prepare(struct device *dev) 127 { 128 #ifdef CONFIG_PM_DEBUG 129 printk(KERN_WARNING "afiucv_pm_prepare\n"); 130 #endif 131 return 0; 132 } 133 134 static void afiucv_pm_complete(struct device *dev) 135 { 136 #ifdef CONFIG_PM_DEBUG 137 printk(KERN_WARNING "afiucv_pm_complete\n"); 138 #endif 139 } 140 141 /** 142 * afiucv_pm_freeze() - Freeze PM callback 143 * @dev: AFIUCV dummy device 144 * 145 * Sever all established IUCV communication pathes 146 */ 147 static int afiucv_pm_freeze(struct device *dev) 148 { 149 struct iucv_sock *iucv; 150 struct sock *sk; 151 struct hlist_node *node; 152 int err = 0; 153 154 #ifdef CONFIG_PM_DEBUG 155 printk(KERN_WARNING "afiucv_pm_freeze\n"); 156 #endif 157 read_lock(&iucv_sk_list.lock); 158 sk_for_each(sk, node, &iucv_sk_list.head) { 159 iucv = iucv_sk(sk); 160 skb_queue_purge(&iucv->send_skb_q); 161 skb_queue_purge(&iucv->backlog_skb_q); 162 switch (sk->sk_state) { 163 case IUCV_SEVERED: 164 case IUCV_DISCONN: 165 case IUCV_CLOSING: 166 case IUCV_CONNECTED: 167 if (iucv->path) { 168 err = iucv_path_sever(iucv->path, NULL); 169 iucv_path_free(iucv->path); 170 iucv->path = NULL; 171 } 172 break; 173 case IUCV_OPEN: 174 case IUCV_BOUND: 175 case IUCV_LISTEN: 176 case IUCV_CLOSED: 177 default: 178 break; 179 } 180 } 181 read_unlock(&iucv_sk_list.lock); 182 return err; 183 } 184 185 /** 186 * afiucv_pm_restore_thaw() - Thaw and restore PM callback 187 * @dev: AFIUCV dummy device 188 * 189 * socket clean up after freeze 190 */ 191 static int afiucv_pm_restore_thaw(struct device *dev) 192 { 193 struct iucv_sock *iucv; 194 struct sock *sk; 195 struct hlist_node *node; 196 197 #ifdef CONFIG_PM_DEBUG 198 printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); 199 #endif 200 read_lock(&iucv_sk_list.lock); 201 sk_for_each(sk, node, &iucv_sk_list.head) { 202 iucv = iucv_sk(sk); 203 switch (sk->sk_state) { 204 case IUCV_CONNECTED: 205 sk->sk_err = EPIPE; 206 sk->sk_state = IUCV_DISCONN; 207 sk->sk_state_change(sk); 208 break; 209 case IUCV_DISCONN: 210 case IUCV_SEVERED: 211 case IUCV_CLOSING: 212 case IUCV_LISTEN: 213 case IUCV_BOUND: 214 case IUCV_OPEN: 215 default: 216 break; 217 } 218 } 219 read_unlock(&iucv_sk_list.lock); 220 return 0; 221 } 222 223 static const struct dev_pm_ops afiucv_pm_ops = { 224 .prepare = afiucv_pm_prepare, 225 .complete = afiucv_pm_complete, 226 .freeze = afiucv_pm_freeze, 227 .thaw = afiucv_pm_restore_thaw, 228 .restore = afiucv_pm_restore_thaw, 229 }; 230 231 static struct device_driver af_iucv_driver = { 232 .owner = THIS_MODULE, 233 .name = "afiucv", 234 .bus = &iucv_bus, 235 .pm = &afiucv_pm_ops, 236 }; 237 238 /* dummy device used as trigger for PM functions */ 239 static struct device *af_iucv_dev; 240 241 /** 242 * iucv_msg_length() - Returns the length of an iucv message. 243 * @msg: Pointer to struct iucv_message, MUST NOT be NULL 244 * 245 * The function returns the length of the specified iucv message @msg of data 246 * stored in a buffer and of data stored in the parameter list (PRMDATA). 247 * 248 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket 249 * data: 250 * PRMDATA[0..6] socket data (max 7 bytes); 251 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) 252 * 253 * The socket data length is computed by substracting the socket data length 254 * value from 0xFF. 255 * If the socket data len is greater 7, then PRMDATA can be used for special 256 * notifications (see iucv_sock_shutdown); and further, 257 * if the socket data len is > 7, the function returns 8. 258 * 259 * Use this function to allocate socket buffers to store iucv message data. 260 */ 261 static inline size_t iucv_msg_length(struct iucv_message *msg) 262 { 263 size_t datalen; 264 265 if (msg->flags & IUCV_IPRMDATA) { 266 datalen = 0xff - msg->rmmsg[7]; 267 return (datalen < 8) ? datalen : 8; 268 } 269 return msg->length; 270 } 271 272 /** 273 * iucv_sock_in_state() - check for specific states 274 * @sk: sock structure 275 * @state: first iucv sk state 276 * @state: second iucv sk state 277 * 278 * Returns true if the socket in either in the first or second state. 279 */ 280 static int iucv_sock_in_state(struct sock *sk, int state, int state2) 281 { 282 return (sk->sk_state == state || sk->sk_state == state2); 283 } 284 285 /** 286 * iucv_below_msglim() - function to check if messages can be sent 287 * @sk: sock structure 288 * 289 * Returns true if the send queue length is lower than the message limit. 290 * Always returns true if the socket is not connected (no iucv path for 291 * checking the message limit). 292 */ 293 static inline int iucv_below_msglim(struct sock *sk) 294 { 295 struct iucv_sock *iucv = iucv_sk(sk); 296 297 if (sk->sk_state != IUCV_CONNECTED) 298 return 1; 299 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 300 } 301 302 /** 303 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit 304 */ 305 static void iucv_sock_wake_msglim(struct sock *sk) 306 { 307 struct socket_wq *wq; 308 309 rcu_read_lock(); 310 wq = rcu_dereference(sk->sk_wq); 311 if (wq_has_sleeper(wq)) 312 wake_up_interruptible_all(&wq->wait); 313 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 314 rcu_read_unlock(); 315 } 316 317 /* Timers */ 318 static void iucv_sock_timeout(unsigned long arg) 319 { 320 struct sock *sk = (struct sock *)arg; 321 322 bh_lock_sock(sk); 323 sk->sk_err = ETIMEDOUT; 324 sk->sk_state_change(sk); 325 bh_unlock_sock(sk); 326 327 iucv_sock_kill(sk); 328 sock_put(sk); 329 } 330 331 static void iucv_sock_clear_timer(struct sock *sk) 332 { 333 sk_stop_timer(sk, &sk->sk_timer); 334 } 335 336 static struct sock *__iucv_get_sock_by_name(char *nm) 337 { 338 struct sock *sk; 339 struct hlist_node *node; 340 341 sk_for_each(sk, node, &iucv_sk_list.head) 342 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 343 return sk; 344 345 return NULL; 346 } 347 348 static void iucv_sock_destruct(struct sock *sk) 349 { 350 skb_queue_purge(&sk->sk_receive_queue); 351 skb_queue_purge(&sk->sk_write_queue); 352 } 353 354 /* Cleanup Listen */ 355 static void iucv_sock_cleanup_listen(struct sock *parent) 356 { 357 struct sock *sk; 358 359 /* Close non-accepted connections */ 360 while ((sk = iucv_accept_dequeue(parent, NULL))) { 361 iucv_sock_close(sk); 362 iucv_sock_kill(sk); 363 } 364 365 parent->sk_state = IUCV_CLOSED; 366 } 367 368 /* Kill socket (only if zapped and orphaned) */ 369 static void iucv_sock_kill(struct sock *sk) 370 { 371 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 372 return; 373 374 iucv_sock_unlink(&iucv_sk_list, sk); 375 sock_set_flag(sk, SOCK_DEAD); 376 sock_put(sk); 377 } 378 379 /* Close an IUCV socket */ 380 static void iucv_sock_close(struct sock *sk) 381 { 382 unsigned char user_data[16]; 383 struct iucv_sock *iucv = iucv_sk(sk); 384 int err; 385 unsigned long timeo; 386 387 iucv_sock_clear_timer(sk); 388 lock_sock(sk); 389 390 switch (sk->sk_state) { 391 case IUCV_LISTEN: 392 iucv_sock_cleanup_listen(sk); 393 break; 394 395 case IUCV_CONNECTED: 396 case IUCV_DISCONN: 397 err = 0; 398 399 sk->sk_state = IUCV_CLOSING; 400 sk->sk_state_change(sk); 401 402 if (!skb_queue_empty(&iucv->send_skb_q)) { 403 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 404 timeo = sk->sk_lingertime; 405 else 406 timeo = IUCV_DISCONN_TIMEOUT; 407 err = iucv_sock_wait(sk, 408 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 409 timeo); 410 } 411 412 case IUCV_CLOSING: /* fall through */ 413 sk->sk_state = IUCV_CLOSED; 414 sk->sk_state_change(sk); 415 416 if (iucv->path) { 417 low_nmcpy(user_data, iucv->src_name); 418 high_nmcpy(user_data, iucv->dst_name); 419 ASCEBC(user_data, sizeof(user_data)); 420 err = iucv_path_sever(iucv->path, user_data); 421 iucv_path_free(iucv->path); 422 iucv->path = NULL; 423 } 424 425 sk->sk_err = ECONNRESET; 426 sk->sk_state_change(sk); 427 428 skb_queue_purge(&iucv->send_skb_q); 429 skb_queue_purge(&iucv->backlog_skb_q); 430 break; 431 432 default: 433 /* nothing to do here */ 434 break; 435 } 436 437 /* mark socket for deletion by iucv_sock_kill() */ 438 sock_set_flag(sk, SOCK_ZAPPED); 439 440 release_sock(sk); 441 } 442 443 static void iucv_sock_init(struct sock *sk, struct sock *parent) 444 { 445 if (parent) 446 sk->sk_type = parent->sk_type; 447 } 448 449 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) 450 { 451 struct sock *sk; 452 453 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); 454 if (!sk) 455 return NULL; 456 457 sock_init_data(sock, sk); 458 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 459 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 460 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 461 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 462 spin_lock_init(&iucv_sk(sk)->message_q.lock); 463 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 464 iucv_sk(sk)->send_tag = 0; 465 iucv_sk(sk)->flags = 0; 466 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT; 467 iucv_sk(sk)->path = NULL; 468 memset(&iucv_sk(sk)->src_user_id , 0, 32); 469 470 sk->sk_destruct = iucv_sock_destruct; 471 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 472 sk->sk_allocation = GFP_DMA; 473 474 sock_reset_flag(sk, SOCK_ZAPPED); 475 476 sk->sk_protocol = proto; 477 sk->sk_state = IUCV_OPEN; 478 479 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk); 480 481 iucv_sock_link(&iucv_sk_list, sk); 482 return sk; 483 } 484 485 /* Create an IUCV socket */ 486 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, 487 int kern) 488 { 489 struct sock *sk; 490 491 if (protocol && protocol != PF_IUCV) 492 return -EPROTONOSUPPORT; 493 494 sock->state = SS_UNCONNECTED; 495 496 switch (sock->type) { 497 case SOCK_STREAM: 498 sock->ops = &iucv_sock_ops; 499 break; 500 case SOCK_SEQPACKET: 501 /* currently, proto ops can handle both sk types */ 502 sock->ops = &iucv_sock_ops; 503 break; 504 default: 505 return -ESOCKTNOSUPPORT; 506 } 507 508 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); 509 if (!sk) 510 return -ENOMEM; 511 512 iucv_sock_init(sk, NULL); 513 514 return 0; 515 } 516 517 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) 518 { 519 write_lock_bh(&l->lock); 520 sk_add_node(sk, &l->head); 521 write_unlock_bh(&l->lock); 522 } 523 524 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) 525 { 526 write_lock_bh(&l->lock); 527 sk_del_node_init(sk); 528 write_unlock_bh(&l->lock); 529 } 530 531 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 532 { 533 unsigned long flags; 534 struct iucv_sock *par = iucv_sk(parent); 535 536 sock_hold(sk); 537 spin_lock_irqsave(&par->accept_q_lock, flags); 538 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); 539 spin_unlock_irqrestore(&par->accept_q_lock, flags); 540 iucv_sk(sk)->parent = parent; 541 sk_acceptq_added(parent); 542 } 543 544 void iucv_accept_unlink(struct sock *sk) 545 { 546 unsigned long flags; 547 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); 548 549 spin_lock_irqsave(&par->accept_q_lock, flags); 550 list_del_init(&iucv_sk(sk)->accept_q); 551 spin_unlock_irqrestore(&par->accept_q_lock, flags); 552 sk_acceptq_removed(iucv_sk(sk)->parent); 553 iucv_sk(sk)->parent = NULL; 554 sock_put(sk); 555 } 556 557 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) 558 { 559 struct iucv_sock *isk, *n; 560 struct sock *sk; 561 562 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 563 sk = (struct sock *) isk; 564 lock_sock(sk); 565 566 if (sk->sk_state == IUCV_CLOSED) { 567 iucv_accept_unlink(sk); 568 release_sock(sk); 569 continue; 570 } 571 572 if (sk->sk_state == IUCV_CONNECTED || 573 sk->sk_state == IUCV_SEVERED || 574 sk->sk_state == IUCV_DISCONN || /* due to PM restore */ 575 !newsock) { 576 iucv_accept_unlink(sk); 577 if (newsock) 578 sock_graft(sk, newsock); 579 580 if (sk->sk_state == IUCV_SEVERED) 581 sk->sk_state = IUCV_DISCONN; 582 583 release_sock(sk); 584 return sk; 585 } 586 587 release_sock(sk); 588 } 589 return NULL; 590 } 591 592 /* Bind an unbound socket */ 593 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 594 int addr_len) 595 { 596 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 597 struct sock *sk = sock->sk; 598 struct iucv_sock *iucv; 599 int err; 600 601 /* Verify the input sockaddr */ 602 if (!addr || addr->sa_family != AF_IUCV) 603 return -EINVAL; 604 605 lock_sock(sk); 606 if (sk->sk_state != IUCV_OPEN) { 607 err = -EBADFD; 608 goto done; 609 } 610 611 write_lock_bh(&iucv_sk_list.lock); 612 613 iucv = iucv_sk(sk); 614 if (__iucv_get_sock_by_name(sa->siucv_name)) { 615 err = -EADDRINUSE; 616 goto done_unlock; 617 } 618 if (iucv->path) { 619 err = 0; 620 goto done_unlock; 621 } 622 623 /* Bind the socket */ 624 memcpy(iucv->src_name, sa->siucv_name, 8); 625 626 /* Copy the user id */ 627 memcpy(iucv->src_user_id, iucv_userid, 8); 628 sk->sk_state = IUCV_BOUND; 629 err = 0; 630 631 done_unlock: 632 /* Release the socket list lock */ 633 write_unlock_bh(&iucv_sk_list.lock); 634 done: 635 release_sock(sk); 636 return err; 637 } 638 639 /* Automatically bind an unbound socket */ 640 static int iucv_sock_autobind(struct sock *sk) 641 { 642 struct iucv_sock *iucv = iucv_sk(sk); 643 char query_buffer[80]; 644 char name[12]; 645 int err = 0; 646 647 /* Set the userid and name */ 648 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err); 649 if (unlikely(err)) 650 return -EPROTO; 651 652 memcpy(iucv->src_user_id, query_buffer, 8); 653 654 write_lock_bh(&iucv_sk_list.lock); 655 656 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); 657 while (__iucv_get_sock_by_name(name)) { 658 sprintf(name, "%08x", 659 atomic_inc_return(&iucv_sk_list.autobind_name)); 660 } 661 662 write_unlock_bh(&iucv_sk_list.lock); 663 664 memcpy(&iucv->src_name, name, 8); 665 666 return err; 667 } 668 669 /* Connect an unconnected socket */ 670 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 671 int alen, int flags) 672 { 673 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 674 struct sock *sk = sock->sk; 675 struct iucv_sock *iucv; 676 unsigned char user_data[16]; 677 int err; 678 679 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) 680 return -EINVAL; 681 682 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 683 return -EBADFD; 684 685 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) 686 return -EINVAL; 687 688 if (sk->sk_state == IUCV_OPEN) { 689 err = iucv_sock_autobind(sk); 690 if (unlikely(err)) 691 return err; 692 } 693 694 lock_sock(sk); 695 696 /* Set the destination information */ 697 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8); 698 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8); 699 700 high_nmcpy(user_data, sa->siucv_name); 701 low_nmcpy(user_data, iucv_sk(sk)->src_name); 702 ASCEBC(user_data, sizeof(user_data)); 703 704 iucv = iucv_sk(sk); 705 /* Create path. */ 706 iucv->path = iucv_path_alloc(iucv->msglimit, 707 IUCV_IPRMDATA, GFP_KERNEL); 708 if (!iucv->path) { 709 err = -ENOMEM; 710 goto done; 711 } 712 err = iucv_path_connect(iucv->path, &af_iucv_handler, 713 sa->siucv_user_id, NULL, user_data, sk); 714 if (err) { 715 iucv_path_free(iucv->path); 716 iucv->path = NULL; 717 switch (err) { 718 case 0x0b: /* Target communicator is not logged on */ 719 err = -ENETUNREACH; 720 break; 721 case 0x0d: /* Max connections for this guest exceeded */ 722 case 0x0e: /* Max connections for target guest exceeded */ 723 err = -EAGAIN; 724 break; 725 case 0x0f: /* Missing IUCV authorization */ 726 err = -EACCES; 727 break; 728 default: 729 err = -ECONNREFUSED; 730 break; 731 } 732 goto done; 733 } 734 735 if (sk->sk_state != IUCV_CONNECTED) { 736 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 737 IUCV_DISCONN), 738 sock_sndtimeo(sk, flags & O_NONBLOCK)); 739 } 740 741 if (sk->sk_state == IUCV_DISCONN) { 742 err = -ECONNREFUSED; 743 } 744 745 if (err) { 746 iucv_path_sever(iucv->path, NULL); 747 iucv_path_free(iucv->path); 748 iucv->path = NULL; 749 } 750 751 done: 752 release_sock(sk); 753 return err; 754 } 755 756 /* Move a socket into listening state. */ 757 static int iucv_sock_listen(struct socket *sock, int backlog) 758 { 759 struct sock *sk = sock->sk; 760 int err; 761 762 lock_sock(sk); 763 764 err = -EINVAL; 765 if (sk->sk_state != IUCV_BOUND) 766 goto done; 767 768 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 769 goto done; 770 771 sk->sk_max_ack_backlog = backlog; 772 sk->sk_ack_backlog = 0; 773 sk->sk_state = IUCV_LISTEN; 774 err = 0; 775 776 done: 777 release_sock(sk); 778 return err; 779 } 780 781 /* Accept a pending connection */ 782 static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 783 int flags) 784 { 785 DECLARE_WAITQUEUE(wait, current); 786 struct sock *sk = sock->sk, *nsk; 787 long timeo; 788 int err = 0; 789 790 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 791 792 if (sk->sk_state != IUCV_LISTEN) { 793 err = -EBADFD; 794 goto done; 795 } 796 797 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 798 799 /* Wait for an incoming connection */ 800 add_wait_queue_exclusive(sk_sleep(sk), &wait); 801 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 802 set_current_state(TASK_INTERRUPTIBLE); 803 if (!timeo) { 804 err = -EAGAIN; 805 break; 806 } 807 808 release_sock(sk); 809 timeo = schedule_timeout(timeo); 810 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 811 812 if (sk->sk_state != IUCV_LISTEN) { 813 err = -EBADFD; 814 break; 815 } 816 817 if (signal_pending(current)) { 818 err = sock_intr_errno(timeo); 819 break; 820 } 821 } 822 823 set_current_state(TASK_RUNNING); 824 remove_wait_queue(sk_sleep(sk), &wait); 825 826 if (err) 827 goto done; 828 829 newsock->state = SS_CONNECTED; 830 831 done: 832 release_sock(sk); 833 return err; 834 } 835 836 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, 837 int *len, int peer) 838 { 839 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 840 struct sock *sk = sock->sk; 841 842 addr->sa_family = AF_IUCV; 843 *len = sizeof(struct sockaddr_iucv); 844 845 if (peer) { 846 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8); 847 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8); 848 } else { 849 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8); 850 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8); 851 } 852 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 853 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 854 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 855 856 return 0; 857 } 858 859 /** 860 * iucv_send_iprm() - Send socket data in parameter list of an iucv message. 861 * @path: IUCV path 862 * @msg: Pointer to a struct iucv_message 863 * @skb: The socket data to send, skb->len MUST BE <= 7 864 * 865 * Send the socket data in the parameter list in the iucv message 866 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter 867 * list and the socket data len at index 7 (last byte). 868 * See also iucv_msg_length(). 869 * 870 * Returns the error code from the iucv_message_send() call. 871 */ 872 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, 873 struct sk_buff *skb) 874 { 875 u8 prmdata[8]; 876 877 memcpy(prmdata, (void *) skb->data, skb->len); 878 prmdata[7] = 0xff - (u8) skb->len; 879 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0, 880 (void *) prmdata, 8); 881 } 882 883 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 884 struct msghdr *msg, size_t len) 885 { 886 struct sock *sk = sock->sk; 887 struct iucv_sock *iucv = iucv_sk(sk); 888 struct sk_buff *skb; 889 struct iucv_message txmsg; 890 struct cmsghdr *cmsg; 891 int cmsg_done; 892 long timeo; 893 char user_id[9]; 894 char appl_id[9]; 895 int err; 896 int noblock = msg->msg_flags & MSG_DONTWAIT; 897 898 err = sock_error(sk); 899 if (err) 900 return err; 901 902 if (msg->msg_flags & MSG_OOB) 903 return -EOPNOTSUPP; 904 905 /* SOCK_SEQPACKET: we do not support segmented records */ 906 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) 907 return -EOPNOTSUPP; 908 909 lock_sock(sk); 910 911 if (sk->sk_shutdown & SEND_SHUTDOWN) { 912 err = -EPIPE; 913 goto out; 914 } 915 916 /* Return if the socket is not in connected state */ 917 if (sk->sk_state != IUCV_CONNECTED) { 918 err = -ENOTCONN; 919 goto out; 920 } 921 922 /* initialize defaults */ 923 cmsg_done = 0; /* check for duplicate headers */ 924 txmsg.class = 0; 925 926 /* iterate over control messages */ 927 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; 928 cmsg = CMSG_NXTHDR(msg, cmsg)) { 929 930 if (!CMSG_OK(msg, cmsg)) { 931 err = -EINVAL; 932 goto out; 933 } 934 935 if (cmsg->cmsg_level != SOL_IUCV) 936 continue; 937 938 if (cmsg->cmsg_type & cmsg_done) { 939 err = -EINVAL; 940 goto out; 941 } 942 cmsg_done |= cmsg->cmsg_type; 943 944 switch (cmsg->cmsg_type) { 945 case SCM_IUCV_TRGCLS: 946 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { 947 err = -EINVAL; 948 goto out; 949 } 950 951 /* set iucv message target class */ 952 memcpy(&txmsg.class, 953 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); 954 955 break; 956 957 default: 958 err = -EINVAL; 959 goto out; 960 break; 961 } 962 } 963 964 /* allocate one skb for each iucv message: 965 * this is fine for SOCK_SEQPACKET (unless we want to support 966 * segmented records using the MSG_EOR flag), but 967 * for SOCK_STREAM we might want to improve it in future */ 968 skb = sock_alloc_send_skb(sk, len, noblock, &err); 969 if (!skb) 970 goto out; 971 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 972 err = -EFAULT; 973 goto fail; 974 } 975 976 /* wait if outstanding messages for iucv path has reached */ 977 timeo = sock_sndtimeo(sk, noblock); 978 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); 979 if (err) 980 goto fail; 981 982 /* return -ECONNRESET if the socket is no longer connected */ 983 if (sk->sk_state != IUCV_CONNECTED) { 984 err = -ECONNRESET; 985 goto fail; 986 } 987 988 /* increment and save iucv message tag for msg_completion cbk */ 989 txmsg.tag = iucv->send_tag++; 990 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 991 skb_queue_tail(&iucv->send_skb_q, skb); 992 993 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) 994 && skb->len <= 7) { 995 err = iucv_send_iprm(iucv->path, &txmsg, skb); 996 997 /* on success: there is no message_complete callback 998 * for an IPRMDATA msg; remove skb from send queue */ 999 if (err == 0) { 1000 skb_unlink(skb, &iucv->send_skb_q); 1001 kfree_skb(skb); 1002 } 1003 1004 /* this error should never happen since the 1005 * IUCV_IPRMDATA path flag is set... sever path */ 1006 if (err == 0x15) { 1007 iucv_path_sever(iucv->path, NULL); 1008 skb_unlink(skb, &iucv->send_skb_q); 1009 err = -EPIPE; 1010 goto fail; 1011 } 1012 } else 1013 err = iucv_message_send(iucv->path, &txmsg, 0, 0, 1014 (void *) skb->data, skb->len); 1015 if (err) { 1016 if (err == 3) { 1017 user_id[8] = 0; 1018 memcpy(user_id, iucv->dst_user_id, 8); 1019 appl_id[8] = 0; 1020 memcpy(appl_id, iucv->dst_name, 8); 1021 pr_err("Application %s on z/VM guest %s" 1022 " exceeds message limit\n", 1023 appl_id, user_id); 1024 err = -EAGAIN; 1025 } else 1026 err = -EPIPE; 1027 skb_unlink(skb, &iucv->send_skb_q); 1028 goto fail; 1029 } 1030 1031 release_sock(sk); 1032 return len; 1033 1034 fail: 1035 kfree_skb(skb); 1036 out: 1037 release_sock(sk); 1038 return err; 1039 } 1040 1041 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's 1042 * 1043 * Locking: must be called with message_q.lock held 1044 */ 1045 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) 1046 { 1047 int dataleft, size, copied = 0; 1048 struct sk_buff *nskb; 1049 1050 dataleft = len; 1051 while (dataleft) { 1052 if (dataleft >= sk->sk_rcvbuf / 4) 1053 size = sk->sk_rcvbuf / 4; 1054 else 1055 size = dataleft; 1056 1057 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); 1058 if (!nskb) 1059 return -ENOMEM; 1060 1061 /* copy target class to control buffer of new skb */ 1062 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); 1063 1064 /* copy data fragment */ 1065 memcpy(nskb->data, skb->data + copied, size); 1066 copied += size; 1067 dataleft -= size; 1068 1069 skb_reset_transport_header(nskb); 1070 skb_reset_network_header(nskb); 1071 nskb->len = size; 1072 1073 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); 1074 } 1075 1076 return 0; 1077 } 1078 1079 /* iucv_process_message() - Receive a single outstanding IUCV message 1080 * 1081 * Locking: must be called with message_q.lock held 1082 */ 1083 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 1084 struct iucv_path *path, 1085 struct iucv_message *msg) 1086 { 1087 int rc; 1088 unsigned int len; 1089 1090 len = iucv_msg_length(msg); 1091 1092 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1093 /* Note: the first 4 bytes are reserved for msg tag */ 1094 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); 1095 1096 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1097 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1098 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { 1099 skb->data = NULL; 1100 skb->len = 0; 1101 } 1102 } else { 1103 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA, 1104 skb->data, len, NULL); 1105 if (rc) { 1106 kfree_skb(skb); 1107 return; 1108 } 1109 /* we need to fragment iucv messages for SOCK_STREAM only; 1110 * for SOCK_SEQPACKET, it is only relevant if we support 1111 * record segmentation using MSG_EOR (see also recvmsg()) */ 1112 if (sk->sk_type == SOCK_STREAM && 1113 skb->truesize >= sk->sk_rcvbuf / 4) { 1114 rc = iucv_fragment_skb(sk, skb, len); 1115 kfree_skb(skb); 1116 skb = NULL; 1117 if (rc) { 1118 iucv_path_sever(path, NULL); 1119 return; 1120 } 1121 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 1122 } else { 1123 skb_reset_transport_header(skb); 1124 skb_reset_network_header(skb); 1125 skb->len = len; 1126 } 1127 } 1128 1129 if (sock_queue_rcv_skb(sk, skb)) 1130 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 1131 } 1132 1133 /* iucv_process_message_q() - Process outstanding IUCV messages 1134 * 1135 * Locking: must be called with message_q.lock held 1136 */ 1137 static void iucv_process_message_q(struct sock *sk) 1138 { 1139 struct iucv_sock *iucv = iucv_sk(sk); 1140 struct sk_buff *skb; 1141 struct sock_msg_q *p, *n; 1142 1143 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1144 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); 1145 if (!skb) 1146 break; 1147 iucv_process_message(sk, skb, p->path, &p->msg); 1148 list_del(&p->list); 1149 kfree(p); 1150 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1151 break; 1152 } 1153 } 1154 1155 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 1156 struct msghdr *msg, size_t len, int flags) 1157 { 1158 int noblock = flags & MSG_DONTWAIT; 1159 struct sock *sk = sock->sk; 1160 struct iucv_sock *iucv = iucv_sk(sk); 1161 unsigned int copied, rlen; 1162 struct sk_buff *skb, *rskb, *cskb; 1163 int err = 0; 1164 1165 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && 1166 skb_queue_empty(&iucv->backlog_skb_q) && 1167 skb_queue_empty(&sk->sk_receive_queue) && 1168 list_empty(&iucv->message_q.list)) 1169 return 0; 1170 1171 if (flags & (MSG_OOB)) 1172 return -EOPNOTSUPP; 1173 1174 /* receive/dequeue next skb: 1175 * the function understands MSG_PEEK and, thus, does not dequeue skb */ 1176 skb = skb_recv_datagram(sk, flags, noblock, &err); 1177 if (!skb) { 1178 if (sk->sk_shutdown & RCV_SHUTDOWN) 1179 return 0; 1180 return err; 1181 } 1182 1183 rlen = skb->len; /* real length of skb */ 1184 copied = min_t(unsigned int, rlen, len); 1185 1186 cskb = skb; 1187 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) { 1188 if (!(flags & MSG_PEEK)) 1189 skb_queue_head(&sk->sk_receive_queue, skb); 1190 return -EFAULT; 1191 } 1192 1193 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ 1194 if (sk->sk_type == SOCK_SEQPACKET) { 1195 if (copied < rlen) 1196 msg->msg_flags |= MSG_TRUNC; 1197 /* each iucv message contains a complete record */ 1198 msg->msg_flags |= MSG_EOR; 1199 } 1200 1201 /* create control message to store iucv msg target class: 1202 * get the trgcls from the control buffer of the skb due to 1203 * fragmentation of original iucv message. */ 1204 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1205 CB_TRGCLS_LEN, CB_TRGCLS(skb)); 1206 if (err) { 1207 if (!(flags & MSG_PEEK)) 1208 skb_queue_head(&sk->sk_receive_queue, skb); 1209 return err; 1210 } 1211 1212 /* Mark read part of skb as used */ 1213 if (!(flags & MSG_PEEK)) { 1214 1215 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1216 if (sk->sk_type == SOCK_STREAM) { 1217 skb_pull(skb, copied); 1218 if (skb->len) { 1219 skb_queue_head(&sk->sk_receive_queue, skb); 1220 goto done; 1221 } 1222 } 1223 1224 kfree_skb(skb); 1225 1226 /* Queue backlog skbs */ 1227 spin_lock_bh(&iucv->message_q.lock); 1228 rskb = skb_dequeue(&iucv->backlog_skb_q); 1229 while (rskb) { 1230 if (sock_queue_rcv_skb(sk, rskb)) { 1231 skb_queue_head(&iucv->backlog_skb_q, 1232 rskb); 1233 break; 1234 } else { 1235 rskb = skb_dequeue(&iucv->backlog_skb_q); 1236 } 1237 } 1238 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1239 if (!list_empty(&iucv->message_q.list)) 1240 iucv_process_message_q(sk); 1241 } 1242 spin_unlock_bh(&iucv->message_q.lock); 1243 } 1244 1245 done: 1246 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ 1247 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) 1248 copied = rlen; 1249 1250 return copied; 1251 } 1252 1253 static inline unsigned int iucv_accept_poll(struct sock *parent) 1254 { 1255 struct iucv_sock *isk, *n; 1256 struct sock *sk; 1257 1258 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 1259 sk = (struct sock *) isk; 1260 1261 if (sk->sk_state == IUCV_CONNECTED) 1262 return POLLIN | POLLRDNORM; 1263 } 1264 1265 return 0; 1266 } 1267 1268 unsigned int iucv_sock_poll(struct file *file, struct socket *sock, 1269 poll_table *wait) 1270 { 1271 struct sock *sk = sock->sk; 1272 unsigned int mask = 0; 1273 1274 sock_poll_wait(file, sk_sleep(sk), wait); 1275 1276 if (sk->sk_state == IUCV_LISTEN) 1277 return iucv_accept_poll(sk); 1278 1279 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1280 mask |= POLLERR; 1281 1282 if (sk->sk_shutdown & RCV_SHUTDOWN) 1283 mask |= POLLRDHUP; 1284 1285 if (sk->sk_shutdown == SHUTDOWN_MASK) 1286 mask |= POLLHUP; 1287 1288 if (!skb_queue_empty(&sk->sk_receive_queue) || 1289 (sk->sk_shutdown & RCV_SHUTDOWN)) 1290 mask |= POLLIN | POLLRDNORM; 1291 1292 if (sk->sk_state == IUCV_CLOSED) 1293 mask |= POLLHUP; 1294 1295 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) 1296 mask |= POLLIN; 1297 1298 if (sock_writeable(sk)) 1299 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1300 else 1301 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1302 1303 return mask; 1304 } 1305 1306 static int iucv_sock_shutdown(struct socket *sock, int how) 1307 { 1308 struct sock *sk = sock->sk; 1309 struct iucv_sock *iucv = iucv_sk(sk); 1310 struct iucv_message txmsg; 1311 int err = 0; 1312 1313 how++; 1314 1315 if ((how & ~SHUTDOWN_MASK) || !how) 1316 return -EINVAL; 1317 1318 lock_sock(sk); 1319 switch (sk->sk_state) { 1320 case IUCV_DISCONN: 1321 case IUCV_CLOSING: 1322 case IUCV_SEVERED: 1323 case IUCV_CLOSED: 1324 err = -ENOTCONN; 1325 goto fail; 1326 1327 default: 1328 sk->sk_shutdown |= how; 1329 break; 1330 } 1331 1332 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1333 txmsg.class = 0; 1334 txmsg.tag = 0; 1335 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 1336 (void *) iprm_shutdown, 8); 1337 if (err) { 1338 switch (err) { 1339 case 1: 1340 err = -ENOTCONN; 1341 break; 1342 case 2: 1343 err = -ECONNRESET; 1344 break; 1345 default: 1346 err = -ENOTCONN; 1347 break; 1348 } 1349 } 1350 } 1351 1352 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1353 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL); 1354 if (err) 1355 err = -ENOTCONN; 1356 1357 skb_queue_purge(&sk->sk_receive_queue); 1358 } 1359 1360 /* Wake up anyone sleeping in poll */ 1361 sk->sk_state_change(sk); 1362 1363 fail: 1364 release_sock(sk); 1365 return err; 1366 } 1367 1368 static int iucv_sock_release(struct socket *sock) 1369 { 1370 struct sock *sk = sock->sk; 1371 int err = 0; 1372 1373 if (!sk) 1374 return 0; 1375 1376 iucv_sock_close(sk); 1377 1378 /* Unregister with IUCV base support */ 1379 if (iucv_sk(sk)->path) { 1380 iucv_path_sever(iucv_sk(sk)->path, NULL); 1381 iucv_path_free(iucv_sk(sk)->path); 1382 iucv_sk(sk)->path = NULL; 1383 } 1384 1385 sock_orphan(sk); 1386 iucv_sock_kill(sk); 1387 return err; 1388 } 1389 1390 /* getsockopt and setsockopt */ 1391 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, 1392 char __user *optval, unsigned int optlen) 1393 { 1394 struct sock *sk = sock->sk; 1395 struct iucv_sock *iucv = iucv_sk(sk); 1396 int val; 1397 int rc; 1398 1399 if (level != SOL_IUCV) 1400 return -ENOPROTOOPT; 1401 1402 if (optlen < sizeof(int)) 1403 return -EINVAL; 1404 1405 if (get_user(val, (int __user *) optval)) 1406 return -EFAULT; 1407 1408 rc = 0; 1409 1410 lock_sock(sk); 1411 switch (optname) { 1412 case SO_IPRMDATA_MSG: 1413 if (val) 1414 iucv->flags |= IUCV_IPRMDATA; 1415 else 1416 iucv->flags &= ~IUCV_IPRMDATA; 1417 break; 1418 case SO_MSGLIMIT: 1419 switch (sk->sk_state) { 1420 case IUCV_OPEN: 1421 case IUCV_BOUND: 1422 if (val < 1 || val > (u16)(~0)) 1423 rc = -EINVAL; 1424 else 1425 iucv->msglimit = val; 1426 break; 1427 default: 1428 rc = -EINVAL; 1429 break; 1430 } 1431 break; 1432 default: 1433 rc = -ENOPROTOOPT; 1434 break; 1435 } 1436 release_sock(sk); 1437 1438 return rc; 1439 } 1440 1441 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, 1442 char __user *optval, int __user *optlen) 1443 { 1444 struct sock *sk = sock->sk; 1445 struct iucv_sock *iucv = iucv_sk(sk); 1446 int val, len; 1447 1448 if (level != SOL_IUCV) 1449 return -ENOPROTOOPT; 1450 1451 if (get_user(len, optlen)) 1452 return -EFAULT; 1453 1454 if (len < 0) 1455 return -EINVAL; 1456 1457 len = min_t(unsigned int, len, sizeof(int)); 1458 1459 switch (optname) { 1460 case SO_IPRMDATA_MSG: 1461 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; 1462 break; 1463 case SO_MSGLIMIT: 1464 lock_sock(sk); 1465 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ 1466 : iucv->msglimit; /* default */ 1467 release_sock(sk); 1468 break; 1469 default: 1470 return -ENOPROTOOPT; 1471 } 1472 1473 if (put_user(len, optlen)) 1474 return -EFAULT; 1475 if (copy_to_user(optval, &val, len)) 1476 return -EFAULT; 1477 1478 return 0; 1479 } 1480 1481 1482 /* Callback wrappers - called from iucv base support */ 1483 static int iucv_callback_connreq(struct iucv_path *path, 1484 u8 ipvmid[8], u8 ipuser[16]) 1485 { 1486 unsigned char user_data[16]; 1487 unsigned char nuser_data[16]; 1488 unsigned char src_name[8]; 1489 struct hlist_node *node; 1490 struct sock *sk, *nsk; 1491 struct iucv_sock *iucv, *niucv; 1492 int err; 1493 1494 memcpy(src_name, ipuser, 8); 1495 EBCASC(src_name, 8); 1496 /* Find out if this path belongs to af_iucv. */ 1497 read_lock(&iucv_sk_list.lock); 1498 iucv = NULL; 1499 sk = NULL; 1500 sk_for_each(sk, node, &iucv_sk_list.head) 1501 if (sk->sk_state == IUCV_LISTEN && 1502 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 1503 /* 1504 * Found a listening socket with 1505 * src_name == ipuser[0-7]. 1506 */ 1507 iucv = iucv_sk(sk); 1508 break; 1509 } 1510 read_unlock(&iucv_sk_list.lock); 1511 if (!iucv) 1512 /* No socket found, not one of our paths. */ 1513 return -EINVAL; 1514 1515 bh_lock_sock(sk); 1516 1517 /* Check if parent socket is listening */ 1518 low_nmcpy(user_data, iucv->src_name); 1519 high_nmcpy(user_data, iucv->dst_name); 1520 ASCEBC(user_data, sizeof(user_data)); 1521 if (sk->sk_state != IUCV_LISTEN) { 1522 err = iucv_path_sever(path, user_data); 1523 iucv_path_free(path); 1524 goto fail; 1525 } 1526 1527 /* Check for backlog size */ 1528 if (sk_acceptq_is_full(sk)) { 1529 err = iucv_path_sever(path, user_data); 1530 iucv_path_free(path); 1531 goto fail; 1532 } 1533 1534 /* Create the new socket */ 1535 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); 1536 if (!nsk) { 1537 err = iucv_path_sever(path, user_data); 1538 iucv_path_free(path); 1539 goto fail; 1540 } 1541 1542 niucv = iucv_sk(nsk); 1543 iucv_sock_init(nsk, sk); 1544 1545 /* Set the new iucv_sock */ 1546 memcpy(niucv->dst_name, ipuser + 8, 8); 1547 EBCASC(niucv->dst_name, 8); 1548 memcpy(niucv->dst_user_id, ipvmid, 8); 1549 memcpy(niucv->src_name, iucv->src_name, 8); 1550 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1551 niucv->path = path; 1552 1553 /* Call iucv_accept */ 1554 high_nmcpy(nuser_data, ipuser + 8); 1555 memcpy(nuser_data + 8, niucv->src_name, 8); 1556 ASCEBC(nuser_data + 8, 8); 1557 1558 /* set message limit for path based on msglimit of accepting socket */ 1559 niucv->msglimit = iucv->msglimit; 1560 path->msglim = iucv->msglimit; 1561 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1562 if (err) { 1563 err = iucv_path_sever(path, user_data); 1564 iucv_path_free(path); 1565 iucv_sock_kill(nsk); 1566 goto fail; 1567 } 1568 1569 iucv_accept_enqueue(sk, nsk); 1570 1571 /* Wake up accept */ 1572 nsk->sk_state = IUCV_CONNECTED; 1573 sk->sk_data_ready(sk, 1); 1574 err = 0; 1575 fail: 1576 bh_unlock_sock(sk); 1577 return 0; 1578 } 1579 1580 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1581 { 1582 struct sock *sk = path->private; 1583 1584 sk->sk_state = IUCV_CONNECTED; 1585 sk->sk_state_change(sk); 1586 } 1587 1588 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1589 { 1590 struct sock *sk = path->private; 1591 struct iucv_sock *iucv = iucv_sk(sk); 1592 struct sk_buff *skb; 1593 struct sock_msg_q *save_msg; 1594 int len; 1595 1596 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1597 iucv_message_reject(path, msg); 1598 return; 1599 } 1600 1601 spin_lock(&iucv->message_q.lock); 1602 1603 if (!list_empty(&iucv->message_q.list) || 1604 !skb_queue_empty(&iucv->backlog_skb_q)) 1605 goto save_message; 1606 1607 len = atomic_read(&sk->sk_rmem_alloc); 1608 len += iucv_msg_length(msg) + sizeof(struct sk_buff); 1609 if (len > sk->sk_rcvbuf) 1610 goto save_message; 1611 1612 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); 1613 if (!skb) 1614 goto save_message; 1615 1616 iucv_process_message(sk, skb, path, msg); 1617 goto out_unlock; 1618 1619 save_message: 1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1621 if (!save_msg) 1622 goto out_unlock; 1623 save_msg->path = path; 1624 save_msg->msg = *msg; 1625 1626 list_add_tail(&save_msg->list, &iucv->message_q.list); 1627 1628 out_unlock: 1629 spin_unlock(&iucv->message_q.lock); 1630 } 1631 1632 static void iucv_callback_txdone(struct iucv_path *path, 1633 struct iucv_message *msg) 1634 { 1635 struct sock *sk = path->private; 1636 struct sk_buff *this = NULL; 1637 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; 1638 struct sk_buff *list_skb = list->next; 1639 unsigned long flags; 1640 1641 if (!skb_queue_empty(list)) { 1642 spin_lock_irqsave(&list->lock, flags); 1643 1644 while (list_skb != (struct sk_buff *)list) { 1645 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { 1646 this = list_skb; 1647 break; 1648 } 1649 list_skb = list_skb->next; 1650 } 1651 if (this) 1652 __skb_unlink(this, list); 1653 1654 spin_unlock_irqrestore(&list->lock, flags); 1655 1656 if (this) { 1657 kfree_skb(this); 1658 /* wake up any process waiting for sending */ 1659 iucv_sock_wake_msglim(sk); 1660 } 1661 } 1662 BUG_ON(!this); 1663 1664 if (sk->sk_state == IUCV_CLOSING) { 1665 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1666 sk->sk_state = IUCV_CLOSED; 1667 sk->sk_state_change(sk); 1668 } 1669 } 1670 1671 } 1672 1673 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1674 { 1675 struct sock *sk = path->private; 1676 1677 if (!list_empty(&iucv_sk(sk)->accept_q)) 1678 sk->sk_state = IUCV_SEVERED; 1679 else 1680 sk->sk_state = IUCV_DISCONN; 1681 1682 sk->sk_state_change(sk); 1683 } 1684 1685 /* called if the other communication side shuts down its RECV direction; 1686 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. 1687 */ 1688 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) 1689 { 1690 struct sock *sk = path->private; 1691 1692 bh_lock_sock(sk); 1693 if (sk->sk_state != IUCV_CLOSED) { 1694 sk->sk_shutdown |= SEND_SHUTDOWN; 1695 sk->sk_state_change(sk); 1696 } 1697 bh_unlock_sock(sk); 1698 } 1699 1700 static const struct proto_ops iucv_sock_ops = { 1701 .family = PF_IUCV, 1702 .owner = THIS_MODULE, 1703 .release = iucv_sock_release, 1704 .bind = iucv_sock_bind, 1705 .connect = iucv_sock_connect, 1706 .listen = iucv_sock_listen, 1707 .accept = iucv_sock_accept, 1708 .getname = iucv_sock_getname, 1709 .sendmsg = iucv_sock_sendmsg, 1710 .recvmsg = iucv_sock_recvmsg, 1711 .poll = iucv_sock_poll, 1712 .ioctl = sock_no_ioctl, 1713 .mmap = sock_no_mmap, 1714 .socketpair = sock_no_socketpair, 1715 .shutdown = iucv_sock_shutdown, 1716 .setsockopt = iucv_sock_setsockopt, 1717 .getsockopt = iucv_sock_getsockopt, 1718 }; 1719 1720 static const struct net_proto_family iucv_sock_family_ops = { 1721 .family = AF_IUCV, 1722 .owner = THIS_MODULE, 1723 .create = iucv_sock_create, 1724 }; 1725 1726 static int __init afiucv_init(void) 1727 { 1728 int err; 1729 1730 if (!MACHINE_IS_VM) { 1731 pr_err("The af_iucv module cannot be loaded" 1732 " without z/VM\n"); 1733 err = -EPROTONOSUPPORT; 1734 goto out; 1735 } 1736 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 1737 if (unlikely(err)) { 1738 WARN_ON(err); 1739 err = -EPROTONOSUPPORT; 1740 goto out; 1741 } 1742 1743 err = iucv_register(&af_iucv_handler, 0); 1744 if (err) 1745 goto out; 1746 err = proto_register(&iucv_proto, 0); 1747 if (err) 1748 goto out_iucv; 1749 err = sock_register(&iucv_sock_family_ops); 1750 if (err) 1751 goto out_proto; 1752 /* establish dummy device */ 1753 err = driver_register(&af_iucv_driver); 1754 if (err) 1755 goto out_sock; 1756 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 1757 if (!af_iucv_dev) { 1758 err = -ENOMEM; 1759 goto out_driver; 1760 } 1761 dev_set_name(af_iucv_dev, "af_iucv"); 1762 af_iucv_dev->bus = &iucv_bus; 1763 af_iucv_dev->parent = iucv_root; 1764 af_iucv_dev->release = (void (*)(struct device *))kfree; 1765 af_iucv_dev->driver = &af_iucv_driver; 1766 err = device_register(af_iucv_dev); 1767 if (err) 1768 goto out_driver; 1769 1770 return 0; 1771 1772 out_driver: 1773 driver_unregister(&af_iucv_driver); 1774 out_sock: 1775 sock_unregister(PF_IUCV); 1776 out_proto: 1777 proto_unregister(&iucv_proto); 1778 out_iucv: 1779 iucv_unregister(&af_iucv_handler, 0); 1780 out: 1781 return err; 1782 } 1783 1784 static void __exit afiucv_exit(void) 1785 { 1786 device_unregister(af_iucv_dev); 1787 driver_unregister(&af_iucv_driver); 1788 sock_unregister(PF_IUCV); 1789 proto_unregister(&iucv_proto); 1790 iucv_unregister(&af_iucv_handler, 0); 1791 } 1792 1793 module_init(afiucv_init); 1794 module_exit(afiucv_exit); 1795 1796 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); 1797 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); 1798 MODULE_VERSION(VERSION); 1799 MODULE_LICENSE("GPL"); 1800 MODULE_ALIAS_NETPROTO(PF_IUCV); 1801