1 /* 2 * IUCV protocol stack for Linux on zSeries 3 * 4 * Copyright IBM Corp. 2006, 2009 5 * 6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 8 * PM functions: 9 * Ursula Braun <ursula.braun@de.ibm.com> 10 */ 11 12 #define KMSG_COMPONENT "af_iucv" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/list.h> 18 #include <linux/errno.h> 19 #include <linux/kernel.h> 20 #include <linux/sched/signal.h> 21 #include <linux/slab.h> 22 #include <linux/skbuff.h> 23 #include <linux/init.h> 24 #include <linux/poll.h> 25 #include <linux/security.h> 26 #include <net/sock.h> 27 #include <asm/ebcdic.h> 28 #include <asm/cpcmd.h> 29 #include <linux/kmod.h> 30 31 #include <net/iucv/af_iucv.h> 32 33 #define VERSION "1.2" 34 35 static char iucv_userid[80]; 36 37 static const struct proto_ops iucv_sock_ops; 38 39 static struct proto iucv_proto = { 40 .name = "AF_IUCV", 41 .owner = THIS_MODULE, 42 .obj_size = sizeof(struct iucv_sock), 43 }; 44 45 static struct iucv_interface *pr_iucv; 46 47 /* special AF_IUCV IPRM messages */ 48 static const u8 iprm_shutdown[8] = 49 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 50 51 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 52 53 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 54 do { \ 55 DEFINE_WAIT(__wait); \ 56 long __timeo = timeo; \ 57 ret = 0; \ 58 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ 59 while (!(condition)) { \ 60 if (!__timeo) { \ 61 ret = -EAGAIN; \ 62 break; \ 63 } \ 64 if (signal_pending(current)) { \ 65 ret = sock_intr_errno(__timeo); \ 66 break; \ 67 } \ 68 release_sock(sk); \ 69 __timeo = schedule_timeout(__timeo); \ 70 lock_sock(sk); \ 71 ret = sock_error(sk); \ 72 if (ret) \ 73 break; \ 74 } \ 75 finish_wait(sk_sleep(sk), &__wait); \ 76 } while (0) 77 78 #define iucv_sock_wait(sk, condition, timeo) \ 79 ({ \ 80 int __ret = 0; \ 81 if (!(condition)) \ 82 __iucv_sock_wait(sk, condition, timeo, __ret); \ 83 __ret; \ 84 }) 85 86 static void iucv_sock_kill(struct sock *sk); 87 static void iucv_sock_close(struct sock *sk); 88 static void iucv_sever_path(struct sock *, int); 89 90 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 91 struct packet_type *pt, struct net_device *orig_dev); 92 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 93 struct sk_buff *skb, u8 flags); 94 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); 95 96 /* Call Back functions */ 97 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 98 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 99 static void iucv_callback_connack(struct iucv_path *, u8 *); 100 static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *); 101 static void iucv_callback_connrej(struct iucv_path *, u8 *); 102 static void iucv_callback_shutdown(struct iucv_path *, u8 *); 103 104 static struct iucv_sock_list iucv_sk_list = { 105 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 106 .autobind_name = ATOMIC_INIT(0) 107 }; 108 109 static struct iucv_handler af_iucv_handler = { 110 .path_pending = iucv_callback_connreq, 111 .path_complete = iucv_callback_connack, 112 .path_severed = iucv_callback_connrej, 113 .message_pending = iucv_callback_rx, 114 .message_complete = iucv_callback_txdone, 115 .path_quiesced = iucv_callback_shutdown, 116 }; 117 118 static inline void high_nmcpy(unsigned char *dst, char *src) 119 { 120 memcpy(dst, src, 8); 121 } 122 123 static inline void low_nmcpy(unsigned char *dst, char *src) 124 { 125 memcpy(&dst[8], src, 8); 126 } 127 128 static int afiucv_pm_prepare(struct device *dev) 129 { 130 #ifdef CONFIG_PM_DEBUG 131 printk(KERN_WARNING "afiucv_pm_prepare\n"); 132 #endif 133 return 0; 134 } 135 136 static void afiucv_pm_complete(struct device *dev) 137 { 138 #ifdef CONFIG_PM_DEBUG 139 printk(KERN_WARNING "afiucv_pm_complete\n"); 140 #endif 141 } 142 143 /** 144 * afiucv_pm_freeze() - Freeze PM callback 145 * @dev: AFIUCV dummy device 146 * 147 * Sever all established IUCV communication pathes 148 */ 149 static int afiucv_pm_freeze(struct device *dev) 150 { 151 struct iucv_sock *iucv; 152 struct sock *sk; 153 int err = 0; 154 155 #ifdef CONFIG_PM_DEBUG 156 printk(KERN_WARNING "afiucv_pm_freeze\n"); 157 #endif 158 read_lock(&iucv_sk_list.lock); 159 sk_for_each(sk, &iucv_sk_list.head) { 160 iucv = iucv_sk(sk); 161 switch (sk->sk_state) { 162 case IUCV_DISCONN: 163 case IUCV_CLOSING: 164 case IUCV_CONNECTED: 165 iucv_sever_path(sk, 0); 166 break; 167 case IUCV_OPEN: 168 case IUCV_BOUND: 169 case IUCV_LISTEN: 170 case IUCV_CLOSED: 171 default: 172 break; 173 } 174 skb_queue_purge(&iucv->send_skb_q); 175 skb_queue_purge(&iucv->backlog_skb_q); 176 } 177 read_unlock(&iucv_sk_list.lock); 178 return err; 179 } 180 181 /** 182 * afiucv_pm_restore_thaw() - Thaw and restore PM callback 183 * @dev: AFIUCV dummy device 184 * 185 * socket clean up after freeze 186 */ 187 static int afiucv_pm_restore_thaw(struct device *dev) 188 { 189 struct sock *sk; 190 191 #ifdef CONFIG_PM_DEBUG 192 printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); 193 #endif 194 read_lock(&iucv_sk_list.lock); 195 sk_for_each(sk, &iucv_sk_list.head) { 196 switch (sk->sk_state) { 197 case IUCV_CONNECTED: 198 sk->sk_err = EPIPE; 199 sk->sk_state = IUCV_DISCONN; 200 sk->sk_state_change(sk); 201 break; 202 case IUCV_DISCONN: 203 case IUCV_CLOSING: 204 case IUCV_LISTEN: 205 case IUCV_BOUND: 206 case IUCV_OPEN: 207 default: 208 break; 209 } 210 } 211 read_unlock(&iucv_sk_list.lock); 212 return 0; 213 } 214 215 static const struct dev_pm_ops afiucv_pm_ops = { 216 .prepare = afiucv_pm_prepare, 217 .complete = afiucv_pm_complete, 218 .freeze = afiucv_pm_freeze, 219 .thaw = afiucv_pm_restore_thaw, 220 .restore = afiucv_pm_restore_thaw, 221 }; 222 223 static struct device_driver af_iucv_driver = { 224 .owner = THIS_MODULE, 225 .name = "afiucv", 226 .bus = NULL, 227 .pm = &afiucv_pm_ops, 228 }; 229 230 /* dummy device used as trigger for PM functions */ 231 static struct device *af_iucv_dev; 232 233 /** 234 * iucv_msg_length() - Returns the length of an iucv message. 235 * @msg: Pointer to struct iucv_message, MUST NOT be NULL 236 * 237 * The function returns the length of the specified iucv message @msg of data 238 * stored in a buffer and of data stored in the parameter list (PRMDATA). 239 * 240 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket 241 * data: 242 * PRMDATA[0..6] socket data (max 7 bytes); 243 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) 244 * 245 * The socket data length is computed by subtracting the socket data length 246 * value from 0xFF. 247 * If the socket data len is greater 7, then PRMDATA can be used for special 248 * notifications (see iucv_sock_shutdown); and further, 249 * if the socket data len is > 7, the function returns 8. 250 * 251 * Use this function to allocate socket buffers to store iucv message data. 252 */ 253 static inline size_t iucv_msg_length(struct iucv_message *msg) 254 { 255 size_t datalen; 256 257 if (msg->flags & IUCV_IPRMDATA) { 258 datalen = 0xff - msg->rmmsg[7]; 259 return (datalen < 8) ? datalen : 8; 260 } 261 return msg->length; 262 } 263 264 /** 265 * iucv_sock_in_state() - check for specific states 266 * @sk: sock structure 267 * @state: first iucv sk state 268 * @state: second iucv sk state 269 * 270 * Returns true if the socket in either in the first or second state. 271 */ 272 static int iucv_sock_in_state(struct sock *sk, int state, int state2) 273 { 274 return (sk->sk_state == state || sk->sk_state == state2); 275 } 276 277 /** 278 * iucv_below_msglim() - function to check if messages can be sent 279 * @sk: sock structure 280 * 281 * Returns true if the send queue length is lower than the message limit. 282 * Always returns true if the socket is not connected (no iucv path for 283 * checking the message limit). 284 */ 285 static inline int iucv_below_msglim(struct sock *sk) 286 { 287 struct iucv_sock *iucv = iucv_sk(sk); 288 289 if (sk->sk_state != IUCV_CONNECTED) 290 return 1; 291 if (iucv->transport == AF_IUCV_TRANS_IUCV) 292 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 293 else 294 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && 295 (atomic_read(&iucv->pendings) <= 0)); 296 } 297 298 /** 299 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit 300 */ 301 static void iucv_sock_wake_msglim(struct sock *sk) 302 { 303 struct socket_wq *wq; 304 305 rcu_read_lock(); 306 wq = rcu_dereference(sk->sk_wq); 307 if (skwq_has_sleeper(wq)) 308 wake_up_interruptible_all(&wq->wait); 309 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 310 rcu_read_unlock(); 311 } 312 313 /** 314 * afiucv_hs_send() - send a message through HiperSockets transport 315 */ 316 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 317 struct sk_buff *skb, u8 flags) 318 { 319 struct iucv_sock *iucv = iucv_sk(sock); 320 struct af_iucv_trans_hdr *phs_hdr; 321 struct sk_buff *nskb; 322 int err, confirm_recv = 0; 323 324 memset(skb->head, 0, ETH_HLEN); 325 phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr)); 326 skb_reset_mac_header(skb); 327 skb_reset_network_header(skb); 328 skb_push(skb, ETH_HLEN); 329 skb_reset_mac_header(skb); 330 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); 331 332 phs_hdr->magic = ETH_P_AF_IUCV; 333 phs_hdr->version = 1; 334 phs_hdr->flags = flags; 335 if (flags == AF_IUCV_FLAG_SYN) 336 phs_hdr->window = iucv->msglimit; 337 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { 338 confirm_recv = atomic_read(&iucv->msg_recv); 339 phs_hdr->window = confirm_recv; 340 if (confirm_recv) 341 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; 342 } 343 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); 344 memcpy(phs_hdr->destAppName, iucv->dst_name, 8); 345 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); 346 memcpy(phs_hdr->srcAppName, iucv->src_name, 8); 347 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); 348 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); 349 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); 350 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); 351 if (imsg) 352 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 353 354 skb->dev = iucv->hs_dev; 355 if (!skb->dev) 356 return -ENODEV; 357 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) 358 return -ENETDOWN; 359 if (skb->len > skb->dev->mtu) { 360 if (sock->sk_type == SOCK_SEQPACKET) 361 return -EMSGSIZE; 362 else 363 skb_trim(skb, skb->dev->mtu); 364 } 365 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 366 nskb = skb_clone(skb, GFP_ATOMIC); 367 if (!nskb) 368 return -ENOMEM; 369 skb_queue_tail(&iucv->send_skb_q, nskb); 370 err = dev_queue_xmit(skb); 371 if (net_xmit_eval(err)) { 372 skb_unlink(nskb, &iucv->send_skb_q); 373 kfree_skb(nskb); 374 } else { 375 atomic_sub(confirm_recv, &iucv->msg_recv); 376 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 377 } 378 return net_xmit_eval(err); 379 } 380 381 static struct sock *__iucv_get_sock_by_name(char *nm) 382 { 383 struct sock *sk; 384 385 sk_for_each(sk, &iucv_sk_list.head) 386 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 387 return sk; 388 389 return NULL; 390 } 391 392 static void iucv_sock_destruct(struct sock *sk) 393 { 394 skb_queue_purge(&sk->sk_receive_queue); 395 skb_queue_purge(&sk->sk_error_queue); 396 397 sk_mem_reclaim(sk); 398 399 if (!sock_flag(sk, SOCK_DEAD)) { 400 pr_err("Attempt to release alive iucv socket %p\n", sk); 401 return; 402 } 403 404 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 405 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 406 WARN_ON(sk->sk_wmem_queued); 407 WARN_ON(sk->sk_forward_alloc); 408 } 409 410 /* Cleanup Listen */ 411 static void iucv_sock_cleanup_listen(struct sock *parent) 412 { 413 struct sock *sk; 414 415 /* Close non-accepted connections */ 416 while ((sk = iucv_accept_dequeue(parent, NULL))) { 417 iucv_sock_close(sk); 418 iucv_sock_kill(sk); 419 } 420 421 parent->sk_state = IUCV_CLOSED; 422 } 423 424 /* Kill socket (only if zapped and orphaned) */ 425 static void iucv_sock_kill(struct sock *sk) 426 { 427 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 428 return; 429 430 iucv_sock_unlink(&iucv_sk_list, sk); 431 sock_set_flag(sk, SOCK_DEAD); 432 sock_put(sk); 433 } 434 435 /* Terminate an IUCV path */ 436 static void iucv_sever_path(struct sock *sk, int with_user_data) 437 { 438 unsigned char user_data[16]; 439 struct iucv_sock *iucv = iucv_sk(sk); 440 struct iucv_path *path = iucv->path; 441 442 if (iucv->path) { 443 iucv->path = NULL; 444 if (with_user_data) { 445 low_nmcpy(user_data, iucv->src_name); 446 high_nmcpy(user_data, iucv->dst_name); 447 ASCEBC(user_data, sizeof(user_data)); 448 pr_iucv->path_sever(path, user_data); 449 } else 450 pr_iucv->path_sever(path, NULL); 451 iucv_path_free(path); 452 } 453 } 454 455 /* Send controlling flags through an IUCV socket for HIPER transport */ 456 static int iucv_send_ctrl(struct sock *sk, u8 flags) 457 { 458 int err = 0; 459 int blen; 460 struct sk_buff *skb; 461 u8 shutdown = 0; 462 463 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; 464 if (sk->sk_shutdown & SEND_SHUTDOWN) { 465 /* controlling flags should be sent anyway */ 466 shutdown = sk->sk_shutdown; 467 sk->sk_shutdown &= RCV_SHUTDOWN; 468 } 469 skb = sock_alloc_send_skb(sk, blen, 1, &err); 470 if (skb) { 471 skb_reserve(skb, blen); 472 err = afiucv_hs_send(NULL, sk, skb, flags); 473 } 474 if (shutdown) 475 sk->sk_shutdown = shutdown; 476 return err; 477 } 478 479 /* Close an IUCV socket */ 480 static void iucv_sock_close(struct sock *sk) 481 { 482 struct iucv_sock *iucv = iucv_sk(sk); 483 unsigned long timeo; 484 int err = 0; 485 486 lock_sock(sk); 487 488 switch (sk->sk_state) { 489 case IUCV_LISTEN: 490 iucv_sock_cleanup_listen(sk); 491 break; 492 493 case IUCV_CONNECTED: 494 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 495 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 496 sk->sk_state = IUCV_DISCONN; 497 sk->sk_state_change(sk); 498 } 499 case IUCV_DISCONN: /* fall through */ 500 sk->sk_state = IUCV_CLOSING; 501 sk->sk_state_change(sk); 502 503 if (!err && !skb_queue_empty(&iucv->send_skb_q)) { 504 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 505 timeo = sk->sk_lingertime; 506 else 507 timeo = IUCV_DISCONN_TIMEOUT; 508 iucv_sock_wait(sk, 509 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 510 timeo); 511 } 512 513 case IUCV_CLOSING: /* fall through */ 514 sk->sk_state = IUCV_CLOSED; 515 sk->sk_state_change(sk); 516 517 sk->sk_err = ECONNRESET; 518 sk->sk_state_change(sk); 519 520 skb_queue_purge(&iucv->send_skb_q); 521 skb_queue_purge(&iucv->backlog_skb_q); 522 523 default: /* fall through */ 524 iucv_sever_path(sk, 1); 525 } 526 527 if (iucv->hs_dev) { 528 dev_put(iucv->hs_dev); 529 iucv->hs_dev = NULL; 530 sk->sk_bound_dev_if = 0; 531 } 532 533 /* mark socket for deletion by iucv_sock_kill() */ 534 sock_set_flag(sk, SOCK_ZAPPED); 535 536 release_sock(sk); 537 } 538 539 static void iucv_sock_init(struct sock *sk, struct sock *parent) 540 { 541 if (parent) { 542 sk->sk_type = parent->sk_type; 543 security_sk_clone(parent, sk); 544 } 545 } 546 547 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) 548 { 549 struct sock *sk; 550 struct iucv_sock *iucv; 551 552 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); 553 if (!sk) 554 return NULL; 555 iucv = iucv_sk(sk); 556 557 sock_init_data(sock, sk); 558 INIT_LIST_HEAD(&iucv->accept_q); 559 spin_lock_init(&iucv->accept_q_lock); 560 skb_queue_head_init(&iucv->send_skb_q); 561 INIT_LIST_HEAD(&iucv->message_q.list); 562 spin_lock_init(&iucv->message_q.lock); 563 skb_queue_head_init(&iucv->backlog_skb_q); 564 iucv->send_tag = 0; 565 atomic_set(&iucv->pendings, 0); 566 iucv->flags = 0; 567 iucv->msglimit = 0; 568 atomic_set(&iucv->msg_sent, 0); 569 atomic_set(&iucv->msg_recv, 0); 570 iucv->path = NULL; 571 iucv->sk_txnotify = afiucv_hs_callback_txnotify; 572 memset(&iucv->src_user_id , 0, 32); 573 if (pr_iucv) 574 iucv->transport = AF_IUCV_TRANS_IUCV; 575 else 576 iucv->transport = AF_IUCV_TRANS_HIPER; 577 578 sk->sk_destruct = iucv_sock_destruct; 579 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 580 sk->sk_allocation = GFP_DMA; 581 582 sock_reset_flag(sk, SOCK_ZAPPED); 583 584 sk->sk_protocol = proto; 585 sk->sk_state = IUCV_OPEN; 586 587 iucv_sock_link(&iucv_sk_list, sk); 588 return sk; 589 } 590 591 /* Create an IUCV socket */ 592 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, 593 int kern) 594 { 595 struct sock *sk; 596 597 if (protocol && protocol != PF_IUCV) 598 return -EPROTONOSUPPORT; 599 600 sock->state = SS_UNCONNECTED; 601 602 switch (sock->type) { 603 case SOCK_STREAM: 604 sock->ops = &iucv_sock_ops; 605 break; 606 case SOCK_SEQPACKET: 607 /* currently, proto ops can handle both sk types */ 608 sock->ops = &iucv_sock_ops; 609 break; 610 default: 611 return -ESOCKTNOSUPPORT; 612 } 613 614 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); 615 if (!sk) 616 return -ENOMEM; 617 618 iucv_sock_init(sk, NULL); 619 620 return 0; 621 } 622 623 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) 624 { 625 write_lock_bh(&l->lock); 626 sk_add_node(sk, &l->head); 627 write_unlock_bh(&l->lock); 628 } 629 630 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) 631 { 632 write_lock_bh(&l->lock); 633 sk_del_node_init(sk); 634 write_unlock_bh(&l->lock); 635 } 636 637 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 638 { 639 unsigned long flags; 640 struct iucv_sock *par = iucv_sk(parent); 641 642 sock_hold(sk); 643 spin_lock_irqsave(&par->accept_q_lock, flags); 644 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); 645 spin_unlock_irqrestore(&par->accept_q_lock, flags); 646 iucv_sk(sk)->parent = parent; 647 sk_acceptq_added(parent); 648 } 649 650 void iucv_accept_unlink(struct sock *sk) 651 { 652 unsigned long flags; 653 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); 654 655 spin_lock_irqsave(&par->accept_q_lock, flags); 656 list_del_init(&iucv_sk(sk)->accept_q); 657 spin_unlock_irqrestore(&par->accept_q_lock, flags); 658 sk_acceptq_removed(iucv_sk(sk)->parent); 659 iucv_sk(sk)->parent = NULL; 660 sock_put(sk); 661 } 662 663 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) 664 { 665 struct iucv_sock *isk, *n; 666 struct sock *sk; 667 668 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 669 sk = (struct sock *) isk; 670 lock_sock(sk); 671 672 if (sk->sk_state == IUCV_CLOSED) { 673 iucv_accept_unlink(sk); 674 release_sock(sk); 675 continue; 676 } 677 678 if (sk->sk_state == IUCV_CONNECTED || 679 sk->sk_state == IUCV_DISCONN || 680 !newsock) { 681 iucv_accept_unlink(sk); 682 if (newsock) 683 sock_graft(sk, newsock); 684 685 release_sock(sk); 686 return sk; 687 } 688 689 release_sock(sk); 690 } 691 return NULL; 692 } 693 694 static void __iucv_auto_name(struct iucv_sock *iucv) 695 { 696 char name[12]; 697 698 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); 699 while (__iucv_get_sock_by_name(name)) { 700 sprintf(name, "%08x", 701 atomic_inc_return(&iucv_sk_list.autobind_name)); 702 } 703 memcpy(iucv->src_name, name, 8); 704 } 705 706 /* Bind an unbound socket */ 707 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 708 int addr_len) 709 { 710 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 711 struct sock *sk = sock->sk; 712 struct iucv_sock *iucv; 713 int err = 0; 714 struct net_device *dev; 715 char uid[9]; 716 717 /* Verify the input sockaddr */ 718 if (addr_len < sizeof(struct sockaddr_iucv) || 719 addr->sa_family != AF_IUCV) 720 return -EINVAL; 721 722 lock_sock(sk); 723 if (sk->sk_state != IUCV_OPEN) { 724 err = -EBADFD; 725 goto done; 726 } 727 728 write_lock_bh(&iucv_sk_list.lock); 729 730 iucv = iucv_sk(sk); 731 if (__iucv_get_sock_by_name(sa->siucv_name)) { 732 err = -EADDRINUSE; 733 goto done_unlock; 734 } 735 if (iucv->path) 736 goto done_unlock; 737 738 /* Bind the socket */ 739 if (pr_iucv) 740 if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) 741 goto vm_bind; /* VM IUCV transport */ 742 743 /* try hiper transport */ 744 memcpy(uid, sa->siucv_user_id, sizeof(uid)); 745 ASCEBC(uid, 8); 746 rcu_read_lock(); 747 for_each_netdev_rcu(&init_net, dev) { 748 if (!memcmp(dev->perm_addr, uid, 8)) { 749 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 750 /* Check for unitialized siucv_name */ 751 if (strncmp(sa->siucv_name, " ", 8) == 0) 752 __iucv_auto_name(iucv); 753 else 754 memcpy(iucv->src_name, sa->siucv_name, 8); 755 sk->sk_bound_dev_if = dev->ifindex; 756 iucv->hs_dev = dev; 757 dev_hold(dev); 758 sk->sk_state = IUCV_BOUND; 759 iucv->transport = AF_IUCV_TRANS_HIPER; 760 if (!iucv->msglimit) 761 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; 762 rcu_read_unlock(); 763 goto done_unlock; 764 } 765 } 766 rcu_read_unlock(); 767 vm_bind: 768 if (pr_iucv) { 769 /* use local userid for backward compat */ 770 memcpy(iucv->src_name, sa->siucv_name, 8); 771 memcpy(iucv->src_user_id, iucv_userid, 8); 772 sk->sk_state = IUCV_BOUND; 773 iucv->transport = AF_IUCV_TRANS_IUCV; 774 if (!iucv->msglimit) 775 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 776 goto done_unlock; 777 } 778 /* found no dev to bind */ 779 err = -ENODEV; 780 done_unlock: 781 /* Release the socket list lock */ 782 write_unlock_bh(&iucv_sk_list.lock); 783 done: 784 release_sock(sk); 785 return err; 786 } 787 788 /* Automatically bind an unbound socket */ 789 static int iucv_sock_autobind(struct sock *sk) 790 { 791 struct iucv_sock *iucv = iucv_sk(sk); 792 int err = 0; 793 794 if (unlikely(!pr_iucv)) 795 return -EPROTO; 796 797 memcpy(iucv->src_user_id, iucv_userid, 8); 798 799 write_lock_bh(&iucv_sk_list.lock); 800 __iucv_auto_name(iucv); 801 write_unlock_bh(&iucv_sk_list.lock); 802 803 if (!iucv->msglimit) 804 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 805 806 return err; 807 } 808 809 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) 810 { 811 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 812 struct sock *sk = sock->sk; 813 struct iucv_sock *iucv = iucv_sk(sk); 814 unsigned char user_data[16]; 815 int err; 816 817 high_nmcpy(user_data, sa->siucv_name); 818 low_nmcpy(user_data, iucv->src_name); 819 ASCEBC(user_data, sizeof(user_data)); 820 821 /* Create path. */ 822 iucv->path = iucv_path_alloc(iucv->msglimit, 823 IUCV_IPRMDATA, GFP_KERNEL); 824 if (!iucv->path) { 825 err = -ENOMEM; 826 goto done; 827 } 828 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, 829 sa->siucv_user_id, NULL, user_data, 830 sk); 831 if (err) { 832 iucv_path_free(iucv->path); 833 iucv->path = NULL; 834 switch (err) { 835 case 0x0b: /* Target communicator is not logged on */ 836 err = -ENETUNREACH; 837 break; 838 case 0x0d: /* Max connections for this guest exceeded */ 839 case 0x0e: /* Max connections for target guest exceeded */ 840 err = -EAGAIN; 841 break; 842 case 0x0f: /* Missing IUCV authorization */ 843 err = -EACCES; 844 break; 845 default: 846 err = -ECONNREFUSED; 847 break; 848 } 849 } 850 done: 851 return err; 852 } 853 854 /* Connect an unconnected socket */ 855 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 856 int alen, int flags) 857 { 858 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 859 struct sock *sk = sock->sk; 860 struct iucv_sock *iucv = iucv_sk(sk); 861 int err; 862 863 if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) 864 return -EINVAL; 865 866 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 867 return -EBADFD; 868 869 if (sk->sk_state == IUCV_OPEN && 870 iucv->transport == AF_IUCV_TRANS_HIPER) 871 return -EBADFD; /* explicit bind required */ 872 873 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) 874 return -EINVAL; 875 876 if (sk->sk_state == IUCV_OPEN) { 877 err = iucv_sock_autobind(sk); 878 if (unlikely(err)) 879 return err; 880 } 881 882 lock_sock(sk); 883 884 /* Set the destination information */ 885 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); 886 memcpy(iucv->dst_name, sa->siucv_name, 8); 887 888 if (iucv->transport == AF_IUCV_TRANS_HIPER) 889 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); 890 else 891 err = afiucv_path_connect(sock, addr); 892 if (err) 893 goto done; 894 895 if (sk->sk_state != IUCV_CONNECTED) 896 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 897 IUCV_DISCONN), 898 sock_sndtimeo(sk, flags & O_NONBLOCK)); 899 900 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) 901 err = -ECONNREFUSED; 902 903 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) 904 iucv_sever_path(sk, 0); 905 906 done: 907 release_sock(sk); 908 return err; 909 } 910 911 /* Move a socket into listening state. */ 912 static int iucv_sock_listen(struct socket *sock, int backlog) 913 { 914 struct sock *sk = sock->sk; 915 int err; 916 917 lock_sock(sk); 918 919 err = -EINVAL; 920 if (sk->sk_state != IUCV_BOUND) 921 goto done; 922 923 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 924 goto done; 925 926 sk->sk_max_ack_backlog = backlog; 927 sk->sk_ack_backlog = 0; 928 sk->sk_state = IUCV_LISTEN; 929 err = 0; 930 931 done: 932 release_sock(sk); 933 return err; 934 } 935 936 /* Accept a pending connection */ 937 static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 938 int flags, bool kern) 939 { 940 DECLARE_WAITQUEUE(wait, current); 941 struct sock *sk = sock->sk, *nsk; 942 long timeo; 943 int err = 0; 944 945 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 946 947 if (sk->sk_state != IUCV_LISTEN) { 948 err = -EBADFD; 949 goto done; 950 } 951 952 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 953 954 /* Wait for an incoming connection */ 955 add_wait_queue_exclusive(sk_sleep(sk), &wait); 956 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 957 set_current_state(TASK_INTERRUPTIBLE); 958 if (!timeo) { 959 err = -EAGAIN; 960 break; 961 } 962 963 release_sock(sk); 964 timeo = schedule_timeout(timeo); 965 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 966 967 if (sk->sk_state != IUCV_LISTEN) { 968 err = -EBADFD; 969 break; 970 } 971 972 if (signal_pending(current)) { 973 err = sock_intr_errno(timeo); 974 break; 975 } 976 } 977 978 set_current_state(TASK_RUNNING); 979 remove_wait_queue(sk_sleep(sk), &wait); 980 981 if (err) 982 goto done; 983 984 newsock->state = SS_CONNECTED; 985 986 done: 987 release_sock(sk); 988 return err; 989 } 990 991 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, 992 int peer) 993 { 994 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 995 struct sock *sk = sock->sk; 996 struct iucv_sock *iucv = iucv_sk(sk); 997 998 addr->sa_family = AF_IUCV; 999 1000 if (peer) { 1001 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); 1002 memcpy(siucv->siucv_name, iucv->dst_name, 8); 1003 } else { 1004 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); 1005 memcpy(siucv->siucv_name, iucv->src_name, 8); 1006 } 1007 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 1008 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 1009 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 1010 1011 return sizeof(struct sockaddr_iucv); 1012 } 1013 1014 /** 1015 * iucv_send_iprm() - Send socket data in parameter list of an iucv message. 1016 * @path: IUCV path 1017 * @msg: Pointer to a struct iucv_message 1018 * @skb: The socket data to send, skb->len MUST BE <= 7 1019 * 1020 * Send the socket data in the parameter list in the iucv message 1021 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter 1022 * list and the socket data len at index 7 (last byte). 1023 * See also iucv_msg_length(). 1024 * 1025 * Returns the error code from the iucv_message_send() call. 1026 */ 1027 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, 1028 struct sk_buff *skb) 1029 { 1030 u8 prmdata[8]; 1031 1032 memcpy(prmdata, (void *) skb->data, skb->len); 1033 prmdata[7] = 0xff - (u8) skb->len; 1034 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, 1035 (void *) prmdata, 8); 1036 } 1037 1038 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, 1039 size_t len) 1040 { 1041 struct sock *sk = sock->sk; 1042 struct iucv_sock *iucv = iucv_sk(sk); 1043 size_t headroom = 0; 1044 size_t linear; 1045 struct sk_buff *skb; 1046 struct iucv_message txmsg = {0}; 1047 struct cmsghdr *cmsg; 1048 int cmsg_done; 1049 long timeo; 1050 char user_id[9]; 1051 char appl_id[9]; 1052 int err; 1053 int noblock = msg->msg_flags & MSG_DONTWAIT; 1054 1055 err = sock_error(sk); 1056 if (err) 1057 return err; 1058 1059 if (msg->msg_flags & MSG_OOB) 1060 return -EOPNOTSUPP; 1061 1062 /* SOCK_SEQPACKET: we do not support segmented records */ 1063 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) 1064 return -EOPNOTSUPP; 1065 1066 lock_sock(sk); 1067 1068 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1069 err = -EPIPE; 1070 goto out; 1071 } 1072 1073 /* Return if the socket is not in connected state */ 1074 if (sk->sk_state != IUCV_CONNECTED) { 1075 err = -ENOTCONN; 1076 goto out; 1077 } 1078 1079 /* initialize defaults */ 1080 cmsg_done = 0; /* check for duplicate headers */ 1081 txmsg.class = 0; 1082 1083 /* iterate over control messages */ 1084 for_each_cmsghdr(cmsg, msg) { 1085 if (!CMSG_OK(msg, cmsg)) { 1086 err = -EINVAL; 1087 goto out; 1088 } 1089 1090 if (cmsg->cmsg_level != SOL_IUCV) 1091 continue; 1092 1093 if (cmsg->cmsg_type & cmsg_done) { 1094 err = -EINVAL; 1095 goto out; 1096 } 1097 cmsg_done |= cmsg->cmsg_type; 1098 1099 switch (cmsg->cmsg_type) { 1100 case SCM_IUCV_TRGCLS: 1101 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { 1102 err = -EINVAL; 1103 goto out; 1104 } 1105 1106 /* set iucv message target class */ 1107 memcpy(&txmsg.class, 1108 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); 1109 1110 break; 1111 1112 default: 1113 err = -EINVAL; 1114 goto out; 1115 } 1116 } 1117 1118 /* allocate one skb for each iucv message: 1119 * this is fine for SOCK_SEQPACKET (unless we want to support 1120 * segmented records using the MSG_EOR flag), but 1121 * for SOCK_STREAM we might want to improve it in future */ 1122 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1123 headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; 1124 linear = len; 1125 } else { 1126 if (len < PAGE_SIZE) { 1127 linear = len; 1128 } else { 1129 /* In nonlinear "classic" iucv skb, 1130 * reserve space for iucv_array 1131 */ 1132 headroom = sizeof(struct iucv_array) * 1133 (MAX_SKB_FRAGS + 1); 1134 linear = PAGE_SIZE - headroom; 1135 } 1136 } 1137 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, 1138 noblock, &err, 0); 1139 if (!skb) 1140 goto out; 1141 if (headroom) 1142 skb_reserve(skb, headroom); 1143 skb_put(skb, linear); 1144 skb->len = len; 1145 skb->data_len = len - linear; 1146 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); 1147 if (err) 1148 goto fail; 1149 1150 /* wait if outstanding messages for iucv path has reached */ 1151 timeo = sock_sndtimeo(sk, noblock); 1152 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); 1153 if (err) 1154 goto fail; 1155 1156 /* return -ECONNRESET if the socket is no longer connected */ 1157 if (sk->sk_state != IUCV_CONNECTED) { 1158 err = -ECONNRESET; 1159 goto fail; 1160 } 1161 1162 /* increment and save iucv message tag for msg_completion cbk */ 1163 txmsg.tag = iucv->send_tag++; 1164 IUCV_SKB_CB(skb)->tag = txmsg.tag; 1165 1166 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1167 atomic_inc(&iucv->msg_sent); 1168 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1169 if (err) { 1170 atomic_dec(&iucv->msg_sent); 1171 goto fail; 1172 } 1173 } else { /* Classic VM IUCV transport */ 1174 skb_queue_tail(&iucv->send_skb_q, skb); 1175 1176 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && 1177 skb->len <= 7) { 1178 err = iucv_send_iprm(iucv->path, &txmsg, skb); 1179 1180 /* on success: there is no message_complete callback */ 1181 /* for an IPRMDATA msg; remove skb from send queue */ 1182 if (err == 0) { 1183 skb_unlink(skb, &iucv->send_skb_q); 1184 kfree_skb(skb); 1185 } 1186 1187 /* this error should never happen since the */ 1188 /* IUCV_IPRMDATA path flag is set... sever path */ 1189 if (err == 0x15) { 1190 pr_iucv->path_sever(iucv->path, NULL); 1191 skb_unlink(skb, &iucv->send_skb_q); 1192 err = -EPIPE; 1193 goto fail; 1194 } 1195 } else if (skb_is_nonlinear(skb)) { 1196 struct iucv_array *iba = (struct iucv_array *)skb->head; 1197 int i; 1198 1199 /* skip iucv_array lying in the headroom */ 1200 iba[0].address = (u32)(addr_t)skb->data; 1201 iba[0].length = (u32)skb_headlen(skb); 1202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1203 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1204 1205 iba[i + 1].address = 1206 (u32)(addr_t)skb_frag_address(frag); 1207 iba[i + 1].length = (u32)skb_frag_size(frag); 1208 } 1209 err = pr_iucv->message_send(iucv->path, &txmsg, 1210 IUCV_IPBUFLST, 0, 1211 (void *)iba, skb->len); 1212 } else { /* non-IPRM Linear skb */ 1213 err = pr_iucv->message_send(iucv->path, &txmsg, 1214 0, 0, (void *)skb->data, skb->len); 1215 } 1216 if (err) { 1217 if (err == 3) { 1218 user_id[8] = 0; 1219 memcpy(user_id, iucv->dst_user_id, 8); 1220 appl_id[8] = 0; 1221 memcpy(appl_id, iucv->dst_name, 8); 1222 pr_err( 1223 "Application %s on z/VM guest %s exceeds message limit\n", 1224 appl_id, user_id); 1225 err = -EAGAIN; 1226 } else { 1227 err = -EPIPE; 1228 } 1229 skb_unlink(skb, &iucv->send_skb_q); 1230 goto fail; 1231 } 1232 } 1233 1234 release_sock(sk); 1235 return len; 1236 1237 fail: 1238 kfree_skb(skb); 1239 out: 1240 release_sock(sk); 1241 return err; 1242 } 1243 1244 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) 1245 { 1246 size_t headroom, linear; 1247 struct sk_buff *skb; 1248 int err; 1249 1250 if (len < PAGE_SIZE) { 1251 headroom = 0; 1252 linear = len; 1253 } else { 1254 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); 1255 linear = PAGE_SIZE - headroom; 1256 } 1257 skb = alloc_skb_with_frags(headroom + linear, len - linear, 1258 0, &err, GFP_ATOMIC | GFP_DMA); 1259 WARN_ONCE(!skb, 1260 "alloc of recv iucv skb len=%lu failed with errcode=%d\n", 1261 len, err); 1262 if (skb) { 1263 if (headroom) 1264 skb_reserve(skb, headroom); 1265 skb_put(skb, linear); 1266 skb->len = len; 1267 skb->data_len = len - linear; 1268 } 1269 return skb; 1270 } 1271 1272 /* iucv_process_message() - Receive a single outstanding IUCV message 1273 * 1274 * Locking: must be called with message_q.lock held 1275 */ 1276 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 1277 struct iucv_path *path, 1278 struct iucv_message *msg) 1279 { 1280 int rc; 1281 unsigned int len; 1282 1283 len = iucv_msg_length(msg); 1284 1285 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1286 /* Note: the first 4 bytes are reserved for msg tag */ 1287 IUCV_SKB_CB(skb)->class = msg->class; 1288 1289 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1290 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1291 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { 1292 skb->data = NULL; 1293 skb->len = 0; 1294 } 1295 } else { 1296 if (skb_is_nonlinear(skb)) { 1297 struct iucv_array *iba = (struct iucv_array *)skb->head; 1298 int i; 1299 1300 iba[0].address = (u32)(addr_t)skb->data; 1301 iba[0].length = (u32)skb_headlen(skb); 1302 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1303 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1304 1305 iba[i + 1].address = 1306 (u32)(addr_t)skb_frag_address(frag); 1307 iba[i + 1].length = (u32)skb_frag_size(frag); 1308 } 1309 rc = pr_iucv->message_receive(path, msg, 1310 IUCV_IPBUFLST, 1311 (void *)iba, len, NULL); 1312 } else { 1313 rc = pr_iucv->message_receive(path, msg, 1314 msg->flags & IUCV_IPRMDATA, 1315 skb->data, len, NULL); 1316 } 1317 if (rc) { 1318 kfree_skb(skb); 1319 return; 1320 } 1321 WARN_ON_ONCE(skb->len != len); 1322 } 1323 1324 IUCV_SKB_CB(skb)->offset = 0; 1325 if (sk_filter(sk, skb)) { 1326 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 1327 kfree_skb(skb); 1328 return; 1329 } 1330 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ 1331 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 1332 } 1333 1334 /* iucv_process_message_q() - Process outstanding IUCV messages 1335 * 1336 * Locking: must be called with message_q.lock held 1337 */ 1338 static void iucv_process_message_q(struct sock *sk) 1339 { 1340 struct iucv_sock *iucv = iucv_sk(sk); 1341 struct sk_buff *skb; 1342 struct sock_msg_q *p, *n; 1343 1344 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1345 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); 1346 if (!skb) 1347 break; 1348 iucv_process_message(sk, skb, p->path, &p->msg); 1349 list_del(&p->list); 1350 kfree(p); 1351 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1352 break; 1353 } 1354 } 1355 1356 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, 1357 size_t len, int flags) 1358 { 1359 int noblock = flags & MSG_DONTWAIT; 1360 struct sock *sk = sock->sk; 1361 struct iucv_sock *iucv = iucv_sk(sk); 1362 unsigned int copied, rlen; 1363 struct sk_buff *skb, *rskb, *cskb; 1364 int err = 0; 1365 u32 offset; 1366 1367 if ((sk->sk_state == IUCV_DISCONN) && 1368 skb_queue_empty(&iucv->backlog_skb_q) && 1369 skb_queue_empty(&sk->sk_receive_queue) && 1370 list_empty(&iucv->message_q.list)) 1371 return 0; 1372 1373 if (flags & (MSG_OOB)) 1374 return -EOPNOTSUPP; 1375 1376 /* receive/dequeue next skb: 1377 * the function understands MSG_PEEK and, thus, does not dequeue skb */ 1378 skb = skb_recv_datagram(sk, flags, noblock, &err); 1379 if (!skb) { 1380 if (sk->sk_shutdown & RCV_SHUTDOWN) 1381 return 0; 1382 return err; 1383 } 1384 1385 offset = IUCV_SKB_CB(skb)->offset; 1386 rlen = skb->len - offset; /* real length of skb */ 1387 copied = min_t(unsigned int, rlen, len); 1388 if (!rlen) 1389 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1390 1391 cskb = skb; 1392 if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { 1393 if (!(flags & MSG_PEEK)) 1394 skb_queue_head(&sk->sk_receive_queue, skb); 1395 return -EFAULT; 1396 } 1397 1398 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ 1399 if (sk->sk_type == SOCK_SEQPACKET) { 1400 if (copied < rlen) 1401 msg->msg_flags |= MSG_TRUNC; 1402 /* each iucv message contains a complete record */ 1403 msg->msg_flags |= MSG_EOR; 1404 } 1405 1406 /* create control message to store iucv msg target class: 1407 * get the trgcls from the control buffer of the skb due to 1408 * fragmentation of original iucv message. */ 1409 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1410 sizeof(IUCV_SKB_CB(skb)->class), 1411 (void *)&IUCV_SKB_CB(skb)->class); 1412 if (err) { 1413 if (!(flags & MSG_PEEK)) 1414 skb_queue_head(&sk->sk_receive_queue, skb); 1415 return err; 1416 } 1417 1418 /* Mark read part of skb as used */ 1419 if (!(flags & MSG_PEEK)) { 1420 1421 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1422 if (sk->sk_type == SOCK_STREAM) { 1423 if (copied < rlen) { 1424 IUCV_SKB_CB(skb)->offset = offset + copied; 1425 skb_queue_head(&sk->sk_receive_queue, skb); 1426 goto done; 1427 } 1428 } 1429 1430 kfree_skb(skb); 1431 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1432 atomic_inc(&iucv->msg_recv); 1433 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { 1434 WARN_ON(1); 1435 iucv_sock_close(sk); 1436 return -EFAULT; 1437 } 1438 } 1439 1440 /* Queue backlog skbs */ 1441 spin_lock_bh(&iucv->message_q.lock); 1442 rskb = skb_dequeue(&iucv->backlog_skb_q); 1443 while (rskb) { 1444 IUCV_SKB_CB(rskb)->offset = 0; 1445 if (__sock_queue_rcv_skb(sk, rskb)) { 1446 /* handle rcv queue full */ 1447 skb_queue_head(&iucv->backlog_skb_q, 1448 rskb); 1449 break; 1450 } 1451 rskb = skb_dequeue(&iucv->backlog_skb_q); 1452 } 1453 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1454 if (!list_empty(&iucv->message_q.list)) 1455 iucv_process_message_q(sk); 1456 if (atomic_read(&iucv->msg_recv) >= 1457 iucv->msglimit / 2) { 1458 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); 1459 if (err) { 1460 sk->sk_state = IUCV_DISCONN; 1461 sk->sk_state_change(sk); 1462 } 1463 } 1464 } 1465 spin_unlock_bh(&iucv->message_q.lock); 1466 } 1467 1468 done: 1469 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ 1470 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) 1471 copied = rlen; 1472 1473 return copied; 1474 } 1475 1476 static inline __poll_t iucv_accept_poll(struct sock *parent) 1477 { 1478 struct iucv_sock *isk, *n; 1479 struct sock *sk; 1480 1481 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 1482 sk = (struct sock *) isk; 1483 1484 if (sk->sk_state == IUCV_CONNECTED) 1485 return EPOLLIN | EPOLLRDNORM; 1486 } 1487 1488 return 0; 1489 } 1490 1491 static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events) 1492 { 1493 struct sock *sk = sock->sk; 1494 __poll_t mask = 0; 1495 1496 if (sk->sk_state == IUCV_LISTEN) 1497 return iucv_accept_poll(sk); 1498 1499 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1500 mask |= EPOLLERR | 1501 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 1502 1503 if (sk->sk_shutdown & RCV_SHUTDOWN) 1504 mask |= EPOLLRDHUP; 1505 1506 if (sk->sk_shutdown == SHUTDOWN_MASK) 1507 mask |= EPOLLHUP; 1508 1509 if (!skb_queue_empty(&sk->sk_receive_queue) || 1510 (sk->sk_shutdown & RCV_SHUTDOWN)) 1511 mask |= EPOLLIN | EPOLLRDNORM; 1512 1513 if (sk->sk_state == IUCV_CLOSED) 1514 mask |= EPOLLHUP; 1515 1516 if (sk->sk_state == IUCV_DISCONN) 1517 mask |= EPOLLIN; 1518 1519 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1520 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1521 else 1522 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1523 1524 return mask; 1525 } 1526 1527 static int iucv_sock_shutdown(struct socket *sock, int how) 1528 { 1529 struct sock *sk = sock->sk; 1530 struct iucv_sock *iucv = iucv_sk(sk); 1531 struct iucv_message txmsg; 1532 int err = 0; 1533 1534 how++; 1535 1536 if ((how & ~SHUTDOWN_MASK) || !how) 1537 return -EINVAL; 1538 1539 lock_sock(sk); 1540 switch (sk->sk_state) { 1541 case IUCV_LISTEN: 1542 case IUCV_DISCONN: 1543 case IUCV_CLOSING: 1544 case IUCV_CLOSED: 1545 err = -ENOTCONN; 1546 goto fail; 1547 default: 1548 break; 1549 } 1550 1551 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1552 if (iucv->transport == AF_IUCV_TRANS_IUCV) { 1553 txmsg.class = 0; 1554 txmsg.tag = 0; 1555 err = pr_iucv->message_send(iucv->path, &txmsg, 1556 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); 1557 if (err) { 1558 switch (err) { 1559 case 1: 1560 err = -ENOTCONN; 1561 break; 1562 case 2: 1563 err = -ECONNRESET; 1564 break; 1565 default: 1566 err = -ENOTCONN; 1567 break; 1568 } 1569 } 1570 } else 1571 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); 1572 } 1573 1574 sk->sk_shutdown |= how; 1575 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1576 if ((iucv->transport == AF_IUCV_TRANS_IUCV) && 1577 iucv->path) { 1578 err = pr_iucv->path_quiesce(iucv->path, NULL); 1579 if (err) 1580 err = -ENOTCONN; 1581 /* skb_queue_purge(&sk->sk_receive_queue); */ 1582 } 1583 skb_queue_purge(&sk->sk_receive_queue); 1584 } 1585 1586 /* Wake up anyone sleeping in poll */ 1587 sk->sk_state_change(sk); 1588 1589 fail: 1590 release_sock(sk); 1591 return err; 1592 } 1593 1594 static int iucv_sock_release(struct socket *sock) 1595 { 1596 struct sock *sk = sock->sk; 1597 int err = 0; 1598 1599 if (!sk) 1600 return 0; 1601 1602 iucv_sock_close(sk); 1603 1604 sock_orphan(sk); 1605 iucv_sock_kill(sk); 1606 return err; 1607 } 1608 1609 /* getsockopt and setsockopt */ 1610 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, 1611 char __user *optval, unsigned int optlen) 1612 { 1613 struct sock *sk = sock->sk; 1614 struct iucv_sock *iucv = iucv_sk(sk); 1615 int val; 1616 int rc; 1617 1618 if (level != SOL_IUCV) 1619 return -ENOPROTOOPT; 1620 1621 if (optlen < sizeof(int)) 1622 return -EINVAL; 1623 1624 if (get_user(val, (int __user *) optval)) 1625 return -EFAULT; 1626 1627 rc = 0; 1628 1629 lock_sock(sk); 1630 switch (optname) { 1631 case SO_IPRMDATA_MSG: 1632 if (val) 1633 iucv->flags |= IUCV_IPRMDATA; 1634 else 1635 iucv->flags &= ~IUCV_IPRMDATA; 1636 break; 1637 case SO_MSGLIMIT: 1638 switch (sk->sk_state) { 1639 case IUCV_OPEN: 1640 case IUCV_BOUND: 1641 if (val < 1 || val > (u16)(~0)) 1642 rc = -EINVAL; 1643 else 1644 iucv->msglimit = val; 1645 break; 1646 default: 1647 rc = -EINVAL; 1648 break; 1649 } 1650 break; 1651 default: 1652 rc = -ENOPROTOOPT; 1653 break; 1654 } 1655 release_sock(sk); 1656 1657 return rc; 1658 } 1659 1660 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, 1661 char __user *optval, int __user *optlen) 1662 { 1663 struct sock *sk = sock->sk; 1664 struct iucv_sock *iucv = iucv_sk(sk); 1665 unsigned int val; 1666 int len; 1667 1668 if (level != SOL_IUCV) 1669 return -ENOPROTOOPT; 1670 1671 if (get_user(len, optlen)) 1672 return -EFAULT; 1673 1674 if (len < 0) 1675 return -EINVAL; 1676 1677 len = min_t(unsigned int, len, sizeof(int)); 1678 1679 switch (optname) { 1680 case SO_IPRMDATA_MSG: 1681 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; 1682 break; 1683 case SO_MSGLIMIT: 1684 lock_sock(sk); 1685 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ 1686 : iucv->msglimit; /* default */ 1687 release_sock(sk); 1688 break; 1689 case SO_MSGSIZE: 1690 if (sk->sk_state == IUCV_OPEN) 1691 return -EBADFD; 1692 val = (iucv->hs_dev) ? iucv->hs_dev->mtu - 1693 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : 1694 0x7fffffff; 1695 break; 1696 default: 1697 return -ENOPROTOOPT; 1698 } 1699 1700 if (put_user(len, optlen)) 1701 return -EFAULT; 1702 if (copy_to_user(optval, &val, len)) 1703 return -EFAULT; 1704 1705 return 0; 1706 } 1707 1708 1709 /* Callback wrappers - called from iucv base support */ 1710 static int iucv_callback_connreq(struct iucv_path *path, 1711 u8 ipvmid[8], u8 ipuser[16]) 1712 { 1713 unsigned char user_data[16]; 1714 unsigned char nuser_data[16]; 1715 unsigned char src_name[8]; 1716 struct sock *sk, *nsk; 1717 struct iucv_sock *iucv, *niucv; 1718 int err; 1719 1720 memcpy(src_name, ipuser, 8); 1721 EBCASC(src_name, 8); 1722 /* Find out if this path belongs to af_iucv. */ 1723 read_lock(&iucv_sk_list.lock); 1724 iucv = NULL; 1725 sk = NULL; 1726 sk_for_each(sk, &iucv_sk_list.head) 1727 if (sk->sk_state == IUCV_LISTEN && 1728 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 1729 /* 1730 * Found a listening socket with 1731 * src_name == ipuser[0-7]. 1732 */ 1733 iucv = iucv_sk(sk); 1734 break; 1735 } 1736 read_unlock(&iucv_sk_list.lock); 1737 if (!iucv) 1738 /* No socket found, not one of our paths. */ 1739 return -EINVAL; 1740 1741 bh_lock_sock(sk); 1742 1743 /* Check if parent socket is listening */ 1744 low_nmcpy(user_data, iucv->src_name); 1745 high_nmcpy(user_data, iucv->dst_name); 1746 ASCEBC(user_data, sizeof(user_data)); 1747 if (sk->sk_state != IUCV_LISTEN) { 1748 err = pr_iucv->path_sever(path, user_data); 1749 iucv_path_free(path); 1750 goto fail; 1751 } 1752 1753 /* Check for backlog size */ 1754 if (sk_acceptq_is_full(sk)) { 1755 err = pr_iucv->path_sever(path, user_data); 1756 iucv_path_free(path); 1757 goto fail; 1758 } 1759 1760 /* Create the new socket */ 1761 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); 1762 if (!nsk) { 1763 err = pr_iucv->path_sever(path, user_data); 1764 iucv_path_free(path); 1765 goto fail; 1766 } 1767 1768 niucv = iucv_sk(nsk); 1769 iucv_sock_init(nsk, sk); 1770 1771 /* Set the new iucv_sock */ 1772 memcpy(niucv->dst_name, ipuser + 8, 8); 1773 EBCASC(niucv->dst_name, 8); 1774 memcpy(niucv->dst_user_id, ipvmid, 8); 1775 memcpy(niucv->src_name, iucv->src_name, 8); 1776 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1777 niucv->path = path; 1778 1779 /* Call iucv_accept */ 1780 high_nmcpy(nuser_data, ipuser + 8); 1781 memcpy(nuser_data + 8, niucv->src_name, 8); 1782 ASCEBC(nuser_data + 8, 8); 1783 1784 /* set message limit for path based on msglimit of accepting socket */ 1785 niucv->msglimit = iucv->msglimit; 1786 path->msglim = iucv->msglimit; 1787 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); 1788 if (err) { 1789 iucv_sever_path(nsk, 1); 1790 iucv_sock_kill(nsk); 1791 goto fail; 1792 } 1793 1794 iucv_accept_enqueue(sk, nsk); 1795 1796 /* Wake up accept */ 1797 nsk->sk_state = IUCV_CONNECTED; 1798 sk->sk_data_ready(sk); 1799 err = 0; 1800 fail: 1801 bh_unlock_sock(sk); 1802 return 0; 1803 } 1804 1805 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1806 { 1807 struct sock *sk = path->private; 1808 1809 sk->sk_state = IUCV_CONNECTED; 1810 sk->sk_state_change(sk); 1811 } 1812 1813 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1814 { 1815 struct sock *sk = path->private; 1816 struct iucv_sock *iucv = iucv_sk(sk); 1817 struct sk_buff *skb; 1818 struct sock_msg_q *save_msg; 1819 int len; 1820 1821 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1822 pr_iucv->message_reject(path, msg); 1823 return; 1824 } 1825 1826 spin_lock(&iucv->message_q.lock); 1827 1828 if (!list_empty(&iucv->message_q.list) || 1829 !skb_queue_empty(&iucv->backlog_skb_q)) 1830 goto save_message; 1831 1832 len = atomic_read(&sk->sk_rmem_alloc); 1833 len += SKB_TRUESIZE(iucv_msg_length(msg)); 1834 if (len > sk->sk_rcvbuf) 1835 goto save_message; 1836 1837 skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); 1838 if (!skb) 1839 goto save_message; 1840 1841 iucv_process_message(sk, skb, path, msg); 1842 goto out_unlock; 1843 1844 save_message: 1845 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1846 if (!save_msg) 1847 goto out_unlock; 1848 save_msg->path = path; 1849 save_msg->msg = *msg; 1850 1851 list_add_tail(&save_msg->list, &iucv->message_q.list); 1852 1853 out_unlock: 1854 spin_unlock(&iucv->message_q.lock); 1855 } 1856 1857 static void iucv_callback_txdone(struct iucv_path *path, 1858 struct iucv_message *msg) 1859 { 1860 struct sock *sk = path->private; 1861 struct sk_buff *this = NULL; 1862 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; 1863 struct sk_buff *list_skb = list->next; 1864 unsigned long flags; 1865 1866 bh_lock_sock(sk); 1867 if (!skb_queue_empty(list)) { 1868 spin_lock_irqsave(&list->lock, flags); 1869 1870 while (list_skb != (struct sk_buff *)list) { 1871 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { 1872 this = list_skb; 1873 break; 1874 } 1875 list_skb = list_skb->next; 1876 } 1877 if (this) 1878 __skb_unlink(this, list); 1879 1880 spin_unlock_irqrestore(&list->lock, flags); 1881 1882 if (this) { 1883 kfree_skb(this); 1884 /* wake up any process waiting for sending */ 1885 iucv_sock_wake_msglim(sk); 1886 } 1887 } 1888 1889 if (sk->sk_state == IUCV_CLOSING) { 1890 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1891 sk->sk_state = IUCV_CLOSED; 1892 sk->sk_state_change(sk); 1893 } 1894 } 1895 bh_unlock_sock(sk); 1896 1897 } 1898 1899 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1900 { 1901 struct sock *sk = path->private; 1902 1903 if (sk->sk_state == IUCV_CLOSED) 1904 return; 1905 1906 bh_lock_sock(sk); 1907 iucv_sever_path(sk, 1); 1908 sk->sk_state = IUCV_DISCONN; 1909 1910 sk->sk_state_change(sk); 1911 bh_unlock_sock(sk); 1912 } 1913 1914 /* called if the other communication side shuts down its RECV direction; 1915 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. 1916 */ 1917 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) 1918 { 1919 struct sock *sk = path->private; 1920 1921 bh_lock_sock(sk); 1922 if (sk->sk_state != IUCV_CLOSED) { 1923 sk->sk_shutdown |= SEND_SHUTDOWN; 1924 sk->sk_state_change(sk); 1925 } 1926 bh_unlock_sock(sk); 1927 } 1928 1929 /***************** HiperSockets transport callbacks ********************/ 1930 static void afiucv_swap_src_dest(struct sk_buff *skb) 1931 { 1932 struct af_iucv_trans_hdr *trans_hdr = 1933 (struct af_iucv_trans_hdr *)skb->data; 1934 char tmpID[8]; 1935 char tmpName[8]; 1936 1937 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 1938 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 1939 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 1940 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 1941 memcpy(tmpID, trans_hdr->srcUserID, 8); 1942 memcpy(tmpName, trans_hdr->srcAppName, 8); 1943 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); 1944 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); 1945 memcpy(trans_hdr->destUserID, tmpID, 8); 1946 memcpy(trans_hdr->destAppName, tmpName, 8); 1947 skb_push(skb, ETH_HLEN); 1948 memset(skb->data, 0, ETH_HLEN); 1949 } 1950 1951 /** 1952 * afiucv_hs_callback_syn - react on received SYN 1953 **/ 1954 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) 1955 { 1956 struct sock *nsk; 1957 struct iucv_sock *iucv, *niucv; 1958 struct af_iucv_trans_hdr *trans_hdr; 1959 int err; 1960 1961 iucv = iucv_sk(sk); 1962 trans_hdr = (struct af_iucv_trans_hdr *)skb->data; 1963 if (!iucv) { 1964 /* no sock - connection refused */ 1965 afiucv_swap_src_dest(skb); 1966 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1967 err = dev_queue_xmit(skb); 1968 goto out; 1969 } 1970 1971 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); 1972 bh_lock_sock(sk); 1973 if ((sk->sk_state != IUCV_LISTEN) || 1974 sk_acceptq_is_full(sk) || 1975 !nsk) { 1976 /* error on server socket - connection refused */ 1977 afiucv_swap_src_dest(skb); 1978 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1979 err = dev_queue_xmit(skb); 1980 iucv_sock_kill(nsk); 1981 bh_unlock_sock(sk); 1982 goto out; 1983 } 1984 1985 niucv = iucv_sk(nsk); 1986 iucv_sock_init(nsk, sk); 1987 niucv->transport = AF_IUCV_TRANS_HIPER; 1988 niucv->msglimit = iucv->msglimit; 1989 if (!trans_hdr->window) 1990 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; 1991 else 1992 niucv->msglimit_peer = trans_hdr->window; 1993 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); 1994 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); 1995 memcpy(niucv->src_name, iucv->src_name, 8); 1996 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1997 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; 1998 niucv->hs_dev = iucv->hs_dev; 1999 dev_hold(niucv->hs_dev); 2000 afiucv_swap_src_dest(skb); 2001 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; 2002 trans_hdr->window = niucv->msglimit; 2003 /* if receiver acks the xmit connection is established */ 2004 err = dev_queue_xmit(skb); 2005 if (!err) { 2006 iucv_accept_enqueue(sk, nsk); 2007 nsk->sk_state = IUCV_CONNECTED; 2008 sk->sk_data_ready(sk); 2009 } else 2010 iucv_sock_kill(nsk); 2011 bh_unlock_sock(sk); 2012 2013 out: 2014 return NET_RX_SUCCESS; 2015 } 2016 2017 /** 2018 * afiucv_hs_callback_synack() - react on received SYN-ACK 2019 **/ 2020 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) 2021 { 2022 struct iucv_sock *iucv = iucv_sk(sk); 2023 struct af_iucv_trans_hdr *trans_hdr = 2024 (struct af_iucv_trans_hdr *)skb->data; 2025 2026 if (!iucv) 2027 goto out; 2028 if (sk->sk_state != IUCV_BOUND) 2029 goto out; 2030 bh_lock_sock(sk); 2031 iucv->msglimit_peer = trans_hdr->window; 2032 sk->sk_state = IUCV_CONNECTED; 2033 sk->sk_state_change(sk); 2034 bh_unlock_sock(sk); 2035 out: 2036 kfree_skb(skb); 2037 return NET_RX_SUCCESS; 2038 } 2039 2040 /** 2041 * afiucv_hs_callback_synfin() - react on received SYN_FIN 2042 **/ 2043 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) 2044 { 2045 struct iucv_sock *iucv = iucv_sk(sk); 2046 2047 if (!iucv) 2048 goto out; 2049 if (sk->sk_state != IUCV_BOUND) 2050 goto out; 2051 bh_lock_sock(sk); 2052 sk->sk_state = IUCV_DISCONN; 2053 sk->sk_state_change(sk); 2054 bh_unlock_sock(sk); 2055 out: 2056 kfree_skb(skb); 2057 return NET_RX_SUCCESS; 2058 } 2059 2060 /** 2061 * afiucv_hs_callback_fin() - react on received FIN 2062 **/ 2063 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) 2064 { 2065 struct iucv_sock *iucv = iucv_sk(sk); 2066 2067 /* other end of connection closed */ 2068 if (!iucv) 2069 goto out; 2070 bh_lock_sock(sk); 2071 if (sk->sk_state == IUCV_CONNECTED) { 2072 sk->sk_state = IUCV_DISCONN; 2073 sk->sk_state_change(sk); 2074 } 2075 bh_unlock_sock(sk); 2076 out: 2077 kfree_skb(skb); 2078 return NET_RX_SUCCESS; 2079 } 2080 2081 /** 2082 * afiucv_hs_callback_win() - react on received WIN 2083 **/ 2084 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) 2085 { 2086 struct iucv_sock *iucv = iucv_sk(sk); 2087 struct af_iucv_trans_hdr *trans_hdr = 2088 (struct af_iucv_trans_hdr *)skb->data; 2089 2090 if (!iucv) 2091 return NET_RX_SUCCESS; 2092 2093 if (sk->sk_state != IUCV_CONNECTED) 2094 return NET_RX_SUCCESS; 2095 2096 atomic_sub(trans_hdr->window, &iucv->msg_sent); 2097 iucv_sock_wake_msglim(sk); 2098 return NET_RX_SUCCESS; 2099 } 2100 2101 /** 2102 * afiucv_hs_callback_rx() - react on received data 2103 **/ 2104 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) 2105 { 2106 struct iucv_sock *iucv = iucv_sk(sk); 2107 2108 if (!iucv) { 2109 kfree_skb(skb); 2110 return NET_RX_SUCCESS; 2111 } 2112 2113 if (sk->sk_state != IUCV_CONNECTED) { 2114 kfree_skb(skb); 2115 return NET_RX_SUCCESS; 2116 } 2117 2118 if (sk->sk_shutdown & RCV_SHUTDOWN) { 2119 kfree_skb(skb); 2120 return NET_RX_SUCCESS; 2121 } 2122 2123 /* write stuff from iucv_msg to skb cb */ 2124 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2125 skb_reset_transport_header(skb); 2126 skb_reset_network_header(skb); 2127 IUCV_SKB_CB(skb)->offset = 0; 2128 if (sk_filter(sk, skb)) { 2129 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 2130 kfree_skb(skb); 2131 return NET_RX_SUCCESS; 2132 } 2133 2134 spin_lock(&iucv->message_q.lock); 2135 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2136 if (__sock_queue_rcv_skb(sk, skb)) 2137 /* handle rcv queue full */ 2138 skb_queue_tail(&iucv->backlog_skb_q, skb); 2139 } else 2140 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 2141 spin_unlock(&iucv->message_q.lock); 2142 return NET_RX_SUCCESS; 2143 } 2144 2145 /** 2146 * afiucv_hs_rcv() - base function for arriving data through HiperSockets 2147 * transport 2148 * called from netif RX softirq 2149 **/ 2150 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 2151 struct packet_type *pt, struct net_device *orig_dev) 2152 { 2153 struct sock *sk; 2154 struct iucv_sock *iucv; 2155 struct af_iucv_trans_hdr *trans_hdr; 2156 char nullstring[8]; 2157 int err = 0; 2158 2159 if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { 2160 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", 2161 (int)skb->len, 2162 (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr))); 2163 kfree_skb(skb); 2164 return NET_RX_SUCCESS; 2165 } 2166 if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) 2167 if (skb_linearize(skb)) { 2168 WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d", 2169 (int)skb->len); 2170 kfree_skb(skb); 2171 return NET_RX_SUCCESS; 2172 } 2173 skb_pull(skb, ETH_HLEN); 2174 trans_hdr = (struct af_iucv_trans_hdr *)skb->data; 2175 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 2176 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 2177 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 2178 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 2179 memset(nullstring, 0, sizeof(nullstring)); 2180 iucv = NULL; 2181 sk = NULL; 2182 read_lock(&iucv_sk_list.lock); 2183 sk_for_each(sk, &iucv_sk_list.head) { 2184 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { 2185 if ((!memcmp(&iucv_sk(sk)->src_name, 2186 trans_hdr->destAppName, 8)) && 2187 (!memcmp(&iucv_sk(sk)->src_user_id, 2188 trans_hdr->destUserID, 8)) && 2189 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && 2190 (!memcmp(&iucv_sk(sk)->dst_user_id, 2191 nullstring, 8))) { 2192 iucv = iucv_sk(sk); 2193 break; 2194 } 2195 } else { 2196 if ((!memcmp(&iucv_sk(sk)->src_name, 2197 trans_hdr->destAppName, 8)) && 2198 (!memcmp(&iucv_sk(sk)->src_user_id, 2199 trans_hdr->destUserID, 8)) && 2200 (!memcmp(&iucv_sk(sk)->dst_name, 2201 trans_hdr->srcAppName, 8)) && 2202 (!memcmp(&iucv_sk(sk)->dst_user_id, 2203 trans_hdr->srcUserID, 8))) { 2204 iucv = iucv_sk(sk); 2205 break; 2206 } 2207 } 2208 } 2209 read_unlock(&iucv_sk_list.lock); 2210 if (!iucv) 2211 sk = NULL; 2212 2213 /* no sock 2214 how should we send with no sock 2215 1) send without sock no send rc checking? 2216 2) introduce default sock to handle this cases 2217 2218 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case 2219 data -> send FIN 2220 SYN|ACK, SYN|FIN, FIN -> no action? */ 2221 2222 switch (trans_hdr->flags) { 2223 case AF_IUCV_FLAG_SYN: 2224 /* connect request */ 2225 err = afiucv_hs_callback_syn(sk, skb); 2226 break; 2227 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): 2228 /* connect request confirmed */ 2229 err = afiucv_hs_callback_synack(sk, skb); 2230 break; 2231 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): 2232 /* connect request refused */ 2233 err = afiucv_hs_callback_synfin(sk, skb); 2234 break; 2235 case (AF_IUCV_FLAG_FIN): 2236 /* close request */ 2237 err = afiucv_hs_callback_fin(sk, skb); 2238 break; 2239 case (AF_IUCV_FLAG_WIN): 2240 err = afiucv_hs_callback_win(sk, skb); 2241 if (skb->len == sizeof(struct af_iucv_trans_hdr)) { 2242 kfree_skb(skb); 2243 break; 2244 } 2245 /* fall through and receive non-zero length data */ 2246 case (AF_IUCV_FLAG_SHT): 2247 /* shutdown request */ 2248 /* fall through and receive zero length data */ 2249 case 0: 2250 /* plain data frame */ 2251 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; 2252 err = afiucv_hs_callback_rx(sk, skb); 2253 break; 2254 default: 2255 ; 2256 } 2257 2258 return err; 2259 } 2260 2261 /** 2262 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets 2263 * transport 2264 **/ 2265 static void afiucv_hs_callback_txnotify(struct sk_buff *skb, 2266 enum iucv_tx_notify n) 2267 { 2268 struct sock *isk = skb->sk; 2269 struct sock *sk = NULL; 2270 struct iucv_sock *iucv = NULL; 2271 struct sk_buff_head *list; 2272 struct sk_buff *list_skb; 2273 struct sk_buff *nskb; 2274 unsigned long flags; 2275 2276 read_lock_irqsave(&iucv_sk_list.lock, flags); 2277 sk_for_each(sk, &iucv_sk_list.head) 2278 if (sk == isk) { 2279 iucv = iucv_sk(sk); 2280 break; 2281 } 2282 read_unlock_irqrestore(&iucv_sk_list.lock, flags); 2283 2284 if (!iucv || sock_flag(sk, SOCK_ZAPPED)) 2285 return; 2286 2287 list = &iucv->send_skb_q; 2288 spin_lock_irqsave(&list->lock, flags); 2289 if (skb_queue_empty(list)) 2290 goto out_unlock; 2291 list_skb = list->next; 2292 nskb = list_skb->next; 2293 while (list_skb != (struct sk_buff *)list) { 2294 if (skb_shinfo(list_skb) == skb_shinfo(skb)) { 2295 switch (n) { 2296 case TX_NOTIFY_OK: 2297 __skb_unlink(list_skb, list); 2298 kfree_skb(list_skb); 2299 iucv_sock_wake_msglim(sk); 2300 break; 2301 case TX_NOTIFY_PENDING: 2302 atomic_inc(&iucv->pendings); 2303 break; 2304 case TX_NOTIFY_DELAYED_OK: 2305 __skb_unlink(list_skb, list); 2306 atomic_dec(&iucv->pendings); 2307 if (atomic_read(&iucv->pendings) <= 0) 2308 iucv_sock_wake_msglim(sk); 2309 kfree_skb(list_skb); 2310 break; 2311 case TX_NOTIFY_UNREACHABLE: 2312 case TX_NOTIFY_DELAYED_UNREACHABLE: 2313 case TX_NOTIFY_TPQFULL: /* not yet used */ 2314 case TX_NOTIFY_GENERALERROR: 2315 case TX_NOTIFY_DELAYED_GENERALERROR: 2316 __skb_unlink(list_skb, list); 2317 kfree_skb(list_skb); 2318 if (sk->sk_state == IUCV_CONNECTED) { 2319 sk->sk_state = IUCV_DISCONN; 2320 sk->sk_state_change(sk); 2321 } 2322 break; 2323 } 2324 break; 2325 } 2326 list_skb = nskb; 2327 nskb = nskb->next; 2328 } 2329 out_unlock: 2330 spin_unlock_irqrestore(&list->lock, flags); 2331 2332 if (sk->sk_state == IUCV_CLOSING) { 2333 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 2334 sk->sk_state = IUCV_CLOSED; 2335 sk->sk_state_change(sk); 2336 } 2337 } 2338 2339 } 2340 2341 /* 2342 * afiucv_netdev_event: handle netdev notifier chain events 2343 */ 2344 static int afiucv_netdev_event(struct notifier_block *this, 2345 unsigned long event, void *ptr) 2346 { 2347 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2348 struct sock *sk; 2349 struct iucv_sock *iucv; 2350 2351 switch (event) { 2352 case NETDEV_REBOOT: 2353 case NETDEV_GOING_DOWN: 2354 sk_for_each(sk, &iucv_sk_list.head) { 2355 iucv = iucv_sk(sk); 2356 if ((iucv->hs_dev == event_dev) && 2357 (sk->sk_state == IUCV_CONNECTED)) { 2358 if (event == NETDEV_GOING_DOWN) 2359 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 2360 sk->sk_state = IUCV_DISCONN; 2361 sk->sk_state_change(sk); 2362 } 2363 } 2364 break; 2365 case NETDEV_DOWN: 2366 case NETDEV_UNREGISTER: 2367 default: 2368 break; 2369 } 2370 return NOTIFY_DONE; 2371 } 2372 2373 static struct notifier_block afiucv_netdev_notifier = { 2374 .notifier_call = afiucv_netdev_event, 2375 }; 2376 2377 static const struct proto_ops iucv_sock_ops = { 2378 .family = PF_IUCV, 2379 .owner = THIS_MODULE, 2380 .release = iucv_sock_release, 2381 .bind = iucv_sock_bind, 2382 .connect = iucv_sock_connect, 2383 .listen = iucv_sock_listen, 2384 .accept = iucv_sock_accept, 2385 .getname = iucv_sock_getname, 2386 .sendmsg = iucv_sock_sendmsg, 2387 .recvmsg = iucv_sock_recvmsg, 2388 .poll_mask = iucv_sock_poll_mask, 2389 .ioctl = sock_no_ioctl, 2390 .mmap = sock_no_mmap, 2391 .socketpair = sock_no_socketpair, 2392 .shutdown = iucv_sock_shutdown, 2393 .setsockopt = iucv_sock_setsockopt, 2394 .getsockopt = iucv_sock_getsockopt, 2395 }; 2396 2397 static const struct net_proto_family iucv_sock_family_ops = { 2398 .family = AF_IUCV, 2399 .owner = THIS_MODULE, 2400 .create = iucv_sock_create, 2401 }; 2402 2403 static struct packet_type iucv_packet_type = { 2404 .type = cpu_to_be16(ETH_P_AF_IUCV), 2405 .func = afiucv_hs_rcv, 2406 }; 2407 2408 static int afiucv_iucv_init(void) 2409 { 2410 int err; 2411 2412 err = pr_iucv->iucv_register(&af_iucv_handler, 0); 2413 if (err) 2414 goto out; 2415 /* establish dummy device */ 2416 af_iucv_driver.bus = pr_iucv->bus; 2417 err = driver_register(&af_iucv_driver); 2418 if (err) 2419 goto out_iucv; 2420 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 2421 if (!af_iucv_dev) { 2422 err = -ENOMEM; 2423 goto out_driver; 2424 } 2425 dev_set_name(af_iucv_dev, "af_iucv"); 2426 af_iucv_dev->bus = pr_iucv->bus; 2427 af_iucv_dev->parent = pr_iucv->root; 2428 af_iucv_dev->release = (void (*)(struct device *))kfree; 2429 af_iucv_dev->driver = &af_iucv_driver; 2430 err = device_register(af_iucv_dev); 2431 if (err) 2432 goto out_iucv_dev; 2433 return 0; 2434 2435 out_iucv_dev: 2436 put_device(af_iucv_dev); 2437 out_driver: 2438 driver_unregister(&af_iucv_driver); 2439 out_iucv: 2440 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2441 out: 2442 return err; 2443 } 2444 2445 static int __init afiucv_init(void) 2446 { 2447 int err; 2448 2449 if (MACHINE_IS_VM) { 2450 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 2451 if (unlikely(err)) { 2452 WARN_ON(err); 2453 err = -EPROTONOSUPPORT; 2454 goto out; 2455 } 2456 2457 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); 2458 if (!pr_iucv) { 2459 printk(KERN_WARNING "iucv_if lookup failed\n"); 2460 memset(&iucv_userid, 0, sizeof(iucv_userid)); 2461 } 2462 } else { 2463 memset(&iucv_userid, 0, sizeof(iucv_userid)); 2464 pr_iucv = NULL; 2465 } 2466 2467 err = proto_register(&iucv_proto, 0); 2468 if (err) 2469 goto out; 2470 err = sock_register(&iucv_sock_family_ops); 2471 if (err) 2472 goto out_proto; 2473 2474 if (pr_iucv) { 2475 err = afiucv_iucv_init(); 2476 if (err) 2477 goto out_sock; 2478 } else 2479 register_netdevice_notifier(&afiucv_netdev_notifier); 2480 dev_add_pack(&iucv_packet_type); 2481 return 0; 2482 2483 out_sock: 2484 sock_unregister(PF_IUCV); 2485 out_proto: 2486 proto_unregister(&iucv_proto); 2487 out: 2488 if (pr_iucv) 2489 symbol_put(iucv_if); 2490 return err; 2491 } 2492 2493 static void __exit afiucv_exit(void) 2494 { 2495 if (pr_iucv) { 2496 device_unregister(af_iucv_dev); 2497 driver_unregister(&af_iucv_driver); 2498 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2499 symbol_put(iucv_if); 2500 } else 2501 unregister_netdevice_notifier(&afiucv_netdev_notifier); 2502 dev_remove_pack(&iucv_packet_type); 2503 sock_unregister(PF_IUCV); 2504 proto_unregister(&iucv_proto); 2505 } 2506 2507 module_init(afiucv_init); 2508 module_exit(afiucv_exit); 2509 2510 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); 2511 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); 2512 MODULE_VERSION(VERSION); 2513 MODULE_LICENSE("GPL"); 2514 MODULE_ALIAS_NETPROTO(PF_IUCV); 2515 2516