1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IUCV protocol stack for Linux on zSeries 4 * 5 * Copyright IBM Corp. 2006, 2009 6 * 7 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 8 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 9 * PM functions: 10 * Ursula Braun <ursula.braun@de.ibm.com> 11 */ 12 13 #define KMSG_COMPONENT "af_iucv" 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 16 #include <linux/filter.h> 17 #include <linux/module.h> 18 #include <linux/netdevice.h> 19 #include <linux/types.h> 20 #include <linux/limits.h> 21 #include <linux/list.h> 22 #include <linux/errno.h> 23 #include <linux/kernel.h> 24 #include <linux/sched/signal.h> 25 #include <linux/slab.h> 26 #include <linux/skbuff.h> 27 #include <linux/init.h> 28 #include <linux/poll.h> 29 #include <linux/security.h> 30 #include <net/sock.h> 31 #include <asm/ebcdic.h> 32 #include <asm/cpcmd.h> 33 #include <linux/kmod.h> 34 35 #include <net/iucv/af_iucv.h> 36 37 #define VERSION "1.2" 38 39 static char iucv_userid[80]; 40 41 static struct proto iucv_proto = { 42 .name = "AF_IUCV", 43 .owner = THIS_MODULE, 44 .obj_size = sizeof(struct iucv_sock), 45 }; 46 47 static struct iucv_interface *pr_iucv; 48 static struct iucv_handler af_iucv_handler; 49 50 /* special AF_IUCV IPRM messages */ 51 static const u8 iprm_shutdown[8] = 52 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 53 54 #define TRGCLS_SIZE sizeof_field(struct iucv_message, class) 55 56 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 57 do { \ 58 DEFINE_WAIT(__wait); \ 59 long __timeo = timeo; \ 60 ret = 0; \ 61 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ 62 while (!(condition)) { \ 63 if (!__timeo) { \ 64 ret = -EAGAIN; \ 65 break; \ 66 } \ 67 if (signal_pending(current)) { \ 68 ret = sock_intr_errno(__timeo); \ 69 break; \ 70 } \ 71 release_sock(sk); \ 72 __timeo = schedule_timeout(__timeo); \ 73 lock_sock(sk); \ 74 ret = sock_error(sk); \ 75 if (ret) \ 76 break; \ 77 } \ 78 finish_wait(sk_sleep(sk), &__wait); \ 79 } while (0) 80 81 #define iucv_sock_wait(sk, condition, timeo) \ 82 ({ \ 83 int __ret = 0; \ 84 if (!(condition)) \ 85 __iucv_sock_wait(sk, condition, timeo, __ret); \ 86 __ret; \ 87 }) 88 89 static struct sock *iucv_accept_dequeue(struct sock *parent, 90 struct socket *newsock); 91 static void iucv_sock_kill(struct sock *sk); 92 static void iucv_sock_close(struct sock *sk); 93 94 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify); 95 96 static struct iucv_sock_list iucv_sk_list = { 97 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 98 .autobind_name = ATOMIC_INIT(0) 99 }; 100 101 static inline void high_nmcpy(unsigned char *dst, char *src) 102 { 103 memcpy(dst, src, 8); 104 } 105 106 static inline void low_nmcpy(unsigned char *dst, char *src) 107 { 108 memcpy(&dst[8], src, 8); 109 } 110 111 /** 112 * iucv_msg_length() - Returns the length of an iucv message. 113 * @msg: Pointer to struct iucv_message, MUST NOT be NULL 114 * 115 * The function returns the length of the specified iucv message @msg of data 116 * stored in a buffer and of data stored in the parameter list (PRMDATA). 117 * 118 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket 119 * data: 120 * PRMDATA[0..6] socket data (max 7 bytes); 121 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) 122 * 123 * The socket data length is computed by subtracting the socket data length 124 * value from 0xFF. 125 * If the socket data len is greater 7, then PRMDATA can be used for special 126 * notifications (see iucv_sock_shutdown); and further, 127 * if the socket data len is > 7, the function returns 8. 128 * 129 * Use this function to allocate socket buffers to store iucv message data. 130 */ 131 static inline size_t iucv_msg_length(struct iucv_message *msg) 132 { 133 size_t datalen; 134 135 if (msg->flags & IUCV_IPRMDATA) { 136 datalen = 0xff - msg->rmmsg[7]; 137 return (datalen < 8) ? datalen : 8; 138 } 139 return msg->length; 140 } 141 142 /** 143 * iucv_sock_in_state() - check for specific states 144 * @sk: sock structure 145 * @state: first iucv sk state 146 * @state2: second iucv sk state 147 * 148 * Returns true if the socket in either in the first or second state. 149 */ 150 static int iucv_sock_in_state(struct sock *sk, int state, int state2) 151 { 152 return (sk->sk_state == state || sk->sk_state == state2); 153 } 154 155 /** 156 * iucv_below_msglim() - function to check if messages can be sent 157 * @sk: sock structure 158 * 159 * Returns true if the send queue length is lower than the message limit. 160 * Always returns true if the socket is not connected (no iucv path for 161 * checking the message limit). 162 */ 163 static inline int iucv_below_msglim(struct sock *sk) 164 { 165 struct iucv_sock *iucv = iucv_sk(sk); 166 167 if (sk->sk_state != IUCV_CONNECTED) 168 return 1; 169 if (iucv->transport == AF_IUCV_TRANS_IUCV) 170 return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim); 171 else 172 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && 173 (atomic_read(&iucv->pendings) <= 0)); 174 } 175 176 /* 177 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit 178 */ 179 static void iucv_sock_wake_msglim(struct sock *sk) 180 { 181 struct socket_wq *wq; 182 183 rcu_read_lock(); 184 wq = rcu_dereference(sk->sk_wq); 185 if (skwq_has_sleeper(wq)) 186 wake_up_interruptible_all(&wq->wait); 187 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 188 rcu_read_unlock(); 189 } 190 191 /* 192 * afiucv_hs_send() - send a message through HiperSockets transport 193 */ 194 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 195 struct sk_buff *skb, u8 flags) 196 { 197 struct iucv_sock *iucv = iucv_sk(sock); 198 struct af_iucv_trans_hdr *phs_hdr; 199 int err, confirm_recv = 0; 200 201 phs_hdr = skb_push(skb, sizeof(*phs_hdr)); 202 memset(phs_hdr, 0, sizeof(*phs_hdr)); 203 skb_reset_network_header(skb); 204 205 phs_hdr->magic = ETH_P_AF_IUCV; 206 phs_hdr->version = 1; 207 phs_hdr->flags = flags; 208 if (flags == AF_IUCV_FLAG_SYN) 209 phs_hdr->window = iucv->msglimit; 210 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { 211 confirm_recv = atomic_read(&iucv->msg_recv); 212 phs_hdr->window = confirm_recv; 213 if (confirm_recv) 214 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; 215 } 216 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); 217 memcpy(phs_hdr->destAppName, iucv->dst_name, 8); 218 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); 219 memcpy(phs_hdr->srcAppName, iucv->src_name, 8); 220 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); 221 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); 222 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); 223 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); 224 if (imsg) 225 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 226 227 skb->dev = iucv->hs_dev; 228 if (!skb->dev) { 229 err = -ENODEV; 230 goto err_free; 231 } 232 233 dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len); 234 235 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { 236 err = -ENETDOWN; 237 goto err_free; 238 } 239 if (skb->len > skb->dev->mtu) { 240 if (sock->sk_type == SOCK_SEQPACKET) { 241 err = -EMSGSIZE; 242 goto err_free; 243 } 244 err = pskb_trim(skb, skb->dev->mtu); 245 if (err) 246 goto err_free; 247 } 248 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 249 250 atomic_inc(&iucv->skbs_in_xmit); 251 err = dev_queue_xmit(skb); 252 if (net_xmit_eval(err)) { 253 atomic_dec(&iucv->skbs_in_xmit); 254 } else { 255 atomic_sub(confirm_recv, &iucv->msg_recv); 256 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 257 } 258 return net_xmit_eval(err); 259 260 err_free: 261 kfree_skb(skb); 262 return err; 263 } 264 265 static struct sock *__iucv_get_sock_by_name(char *nm) 266 { 267 struct sock *sk; 268 269 sk_for_each(sk, &iucv_sk_list.head) 270 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 271 return sk; 272 273 return NULL; 274 } 275 276 static void iucv_sock_destruct(struct sock *sk) 277 { 278 skb_queue_purge(&sk->sk_receive_queue); 279 skb_queue_purge(&sk->sk_error_queue); 280 281 if (!sock_flag(sk, SOCK_DEAD)) { 282 pr_err("Attempt to release alive iucv socket %p\n", sk); 283 return; 284 } 285 286 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 287 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 288 WARN_ON(sk->sk_wmem_queued); 289 WARN_ON(sk->sk_forward_alloc); 290 } 291 292 /* Cleanup Listen */ 293 static void iucv_sock_cleanup_listen(struct sock *parent) 294 { 295 struct sock *sk; 296 297 /* Close non-accepted connections */ 298 while ((sk = iucv_accept_dequeue(parent, NULL))) { 299 iucv_sock_close(sk); 300 iucv_sock_kill(sk); 301 } 302 303 parent->sk_state = IUCV_CLOSED; 304 } 305 306 static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) 307 { 308 write_lock_bh(&l->lock); 309 sk_add_node(sk, &l->head); 310 write_unlock_bh(&l->lock); 311 } 312 313 static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) 314 { 315 write_lock_bh(&l->lock); 316 sk_del_node_init(sk); 317 write_unlock_bh(&l->lock); 318 } 319 320 /* Kill socket (only if zapped and orphaned) */ 321 static void iucv_sock_kill(struct sock *sk) 322 { 323 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 324 return; 325 326 iucv_sock_unlink(&iucv_sk_list, sk); 327 sock_set_flag(sk, SOCK_DEAD); 328 sock_put(sk); 329 } 330 331 /* Terminate an IUCV path */ 332 static void iucv_sever_path(struct sock *sk, int with_user_data) 333 { 334 unsigned char user_data[16]; 335 struct iucv_sock *iucv = iucv_sk(sk); 336 struct iucv_path *path = iucv->path; 337 338 if (iucv->path) { 339 iucv->path = NULL; 340 if (with_user_data) { 341 low_nmcpy(user_data, iucv->src_name); 342 high_nmcpy(user_data, iucv->dst_name); 343 ASCEBC(user_data, sizeof(user_data)); 344 pr_iucv->path_sever(path, user_data); 345 } else 346 pr_iucv->path_sever(path, NULL); 347 iucv_path_free(path); 348 } 349 } 350 351 /* Send controlling flags through an IUCV socket for HIPER transport */ 352 static int iucv_send_ctrl(struct sock *sk, u8 flags) 353 { 354 struct iucv_sock *iucv = iucv_sk(sk); 355 int err = 0; 356 int blen; 357 struct sk_buff *skb; 358 u8 shutdown = 0; 359 360 blen = sizeof(struct af_iucv_trans_hdr) + 361 LL_RESERVED_SPACE(iucv->hs_dev); 362 if (sk->sk_shutdown & SEND_SHUTDOWN) { 363 /* controlling flags should be sent anyway */ 364 shutdown = sk->sk_shutdown; 365 sk->sk_shutdown &= RCV_SHUTDOWN; 366 } 367 skb = sock_alloc_send_skb(sk, blen, 1, &err); 368 if (skb) { 369 skb_reserve(skb, blen); 370 err = afiucv_hs_send(NULL, sk, skb, flags); 371 } 372 if (shutdown) 373 sk->sk_shutdown = shutdown; 374 return err; 375 } 376 377 /* Close an IUCV socket */ 378 static void iucv_sock_close(struct sock *sk) 379 { 380 struct iucv_sock *iucv = iucv_sk(sk); 381 unsigned long timeo; 382 int err = 0; 383 384 lock_sock(sk); 385 386 switch (sk->sk_state) { 387 case IUCV_LISTEN: 388 iucv_sock_cleanup_listen(sk); 389 break; 390 391 case IUCV_CONNECTED: 392 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 393 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 394 sk->sk_state = IUCV_DISCONN; 395 sk->sk_state_change(sk); 396 } 397 fallthrough; 398 399 case IUCV_DISCONN: 400 sk->sk_state = IUCV_CLOSING; 401 sk->sk_state_change(sk); 402 403 if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) { 404 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 405 timeo = sk->sk_lingertime; 406 else 407 timeo = IUCV_DISCONN_TIMEOUT; 408 iucv_sock_wait(sk, 409 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 410 timeo); 411 } 412 fallthrough; 413 414 case IUCV_CLOSING: 415 sk->sk_state = IUCV_CLOSED; 416 sk->sk_state_change(sk); 417 418 sk->sk_err = ECONNRESET; 419 sk->sk_state_change(sk); 420 421 skb_queue_purge(&iucv->send_skb_q); 422 skb_queue_purge(&iucv->backlog_skb_q); 423 fallthrough; 424 425 default: 426 iucv_sever_path(sk, 1); 427 } 428 429 if (iucv->hs_dev) { 430 dev_put(iucv->hs_dev); 431 iucv->hs_dev = NULL; 432 sk->sk_bound_dev_if = 0; 433 } 434 435 /* mark socket for deletion by iucv_sock_kill() */ 436 sock_set_flag(sk, SOCK_ZAPPED); 437 438 release_sock(sk); 439 } 440 441 static void iucv_sock_init(struct sock *sk, struct sock *parent) 442 { 443 if (parent) { 444 sk->sk_type = parent->sk_type; 445 security_sk_clone(parent, sk); 446 } 447 } 448 449 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) 450 { 451 struct sock *sk; 452 struct iucv_sock *iucv; 453 454 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); 455 if (!sk) 456 return NULL; 457 iucv = iucv_sk(sk); 458 459 sock_init_data(sock, sk); 460 INIT_LIST_HEAD(&iucv->accept_q); 461 spin_lock_init(&iucv->accept_q_lock); 462 skb_queue_head_init(&iucv->send_skb_q); 463 INIT_LIST_HEAD(&iucv->message_q.list); 464 spin_lock_init(&iucv->message_q.lock); 465 skb_queue_head_init(&iucv->backlog_skb_q); 466 iucv->send_tag = 0; 467 atomic_set(&iucv->pendings, 0); 468 iucv->flags = 0; 469 iucv->msglimit = 0; 470 atomic_set(&iucv->skbs_in_xmit, 0); 471 atomic_set(&iucv->msg_sent, 0); 472 atomic_set(&iucv->msg_recv, 0); 473 iucv->path = NULL; 474 iucv->sk_txnotify = afiucv_hs_callback_txnotify; 475 memset(&iucv->init, 0, sizeof(iucv->init)); 476 if (pr_iucv) 477 iucv->transport = AF_IUCV_TRANS_IUCV; 478 else 479 iucv->transport = AF_IUCV_TRANS_HIPER; 480 481 sk->sk_destruct = iucv_sock_destruct; 482 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 483 484 sock_reset_flag(sk, SOCK_ZAPPED); 485 486 sk->sk_protocol = proto; 487 sk->sk_state = IUCV_OPEN; 488 489 iucv_sock_link(&iucv_sk_list, sk); 490 return sk; 491 } 492 493 static void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 494 { 495 unsigned long flags; 496 struct iucv_sock *par = iucv_sk(parent); 497 498 sock_hold(sk); 499 spin_lock_irqsave(&par->accept_q_lock, flags); 500 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); 501 spin_unlock_irqrestore(&par->accept_q_lock, flags); 502 iucv_sk(sk)->parent = parent; 503 sk_acceptq_added(parent); 504 } 505 506 static void iucv_accept_unlink(struct sock *sk) 507 { 508 unsigned long flags; 509 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); 510 511 spin_lock_irqsave(&par->accept_q_lock, flags); 512 list_del_init(&iucv_sk(sk)->accept_q); 513 spin_unlock_irqrestore(&par->accept_q_lock, flags); 514 sk_acceptq_removed(iucv_sk(sk)->parent); 515 iucv_sk(sk)->parent = NULL; 516 sock_put(sk); 517 } 518 519 static struct sock *iucv_accept_dequeue(struct sock *parent, 520 struct socket *newsock) 521 { 522 struct iucv_sock *isk, *n; 523 struct sock *sk; 524 525 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 526 sk = (struct sock *) isk; 527 lock_sock(sk); 528 529 if (sk->sk_state == IUCV_CLOSED) { 530 iucv_accept_unlink(sk); 531 release_sock(sk); 532 continue; 533 } 534 535 if (sk->sk_state == IUCV_CONNECTED || 536 sk->sk_state == IUCV_DISCONN || 537 !newsock) { 538 iucv_accept_unlink(sk); 539 if (newsock) 540 sock_graft(sk, newsock); 541 542 release_sock(sk); 543 return sk; 544 } 545 546 release_sock(sk); 547 } 548 return NULL; 549 } 550 551 static void __iucv_auto_name(struct iucv_sock *iucv) 552 { 553 char name[12]; 554 555 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); 556 while (__iucv_get_sock_by_name(name)) { 557 sprintf(name, "%08x", 558 atomic_inc_return(&iucv_sk_list.autobind_name)); 559 } 560 memcpy(iucv->src_name, name, 8); 561 } 562 563 /* Bind an unbound socket */ 564 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 565 int addr_len) 566 { 567 DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); 568 char uid[sizeof(sa->siucv_user_id)]; 569 struct sock *sk = sock->sk; 570 struct iucv_sock *iucv; 571 int err = 0; 572 struct net_device *dev; 573 574 /* Verify the input sockaddr */ 575 if (addr_len < sizeof(struct sockaddr_iucv) || 576 addr->sa_family != AF_IUCV) 577 return -EINVAL; 578 579 lock_sock(sk); 580 if (sk->sk_state != IUCV_OPEN) { 581 err = -EBADFD; 582 goto done; 583 } 584 585 write_lock_bh(&iucv_sk_list.lock); 586 587 iucv = iucv_sk(sk); 588 if (__iucv_get_sock_by_name(sa->siucv_name)) { 589 err = -EADDRINUSE; 590 goto done_unlock; 591 } 592 if (iucv->path) 593 goto done_unlock; 594 595 /* Bind the socket */ 596 if (pr_iucv) 597 if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) 598 goto vm_bind; /* VM IUCV transport */ 599 600 /* try hiper transport */ 601 memcpy(uid, sa->siucv_user_id, sizeof(uid)); 602 ASCEBC(uid, 8); 603 rcu_read_lock(); 604 for_each_netdev_rcu(&init_net, dev) { 605 if (!memcmp(dev->perm_addr, uid, 8)) { 606 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 607 /* Check for uninitialized siucv_name */ 608 if (strncmp(sa->siucv_name, " ", 8) == 0) 609 __iucv_auto_name(iucv); 610 else 611 memcpy(iucv->src_name, sa->siucv_name, 8); 612 sk->sk_bound_dev_if = dev->ifindex; 613 iucv->hs_dev = dev; 614 dev_hold(dev); 615 sk->sk_state = IUCV_BOUND; 616 iucv->transport = AF_IUCV_TRANS_HIPER; 617 if (!iucv->msglimit) 618 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; 619 rcu_read_unlock(); 620 goto done_unlock; 621 } 622 } 623 rcu_read_unlock(); 624 vm_bind: 625 if (pr_iucv) { 626 /* use local userid for backward compat */ 627 memcpy(iucv->src_name, sa->siucv_name, 8); 628 memcpy(iucv->src_user_id, iucv_userid, 8); 629 sk->sk_state = IUCV_BOUND; 630 iucv->transport = AF_IUCV_TRANS_IUCV; 631 sk->sk_allocation |= GFP_DMA; 632 if (!iucv->msglimit) 633 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 634 goto done_unlock; 635 } 636 /* found no dev to bind */ 637 err = -ENODEV; 638 done_unlock: 639 /* Release the socket list lock */ 640 write_unlock_bh(&iucv_sk_list.lock); 641 done: 642 release_sock(sk); 643 return err; 644 } 645 646 /* Automatically bind an unbound socket */ 647 static int iucv_sock_autobind(struct sock *sk) 648 { 649 struct iucv_sock *iucv = iucv_sk(sk); 650 int err = 0; 651 652 if (unlikely(!pr_iucv)) 653 return -EPROTO; 654 655 memcpy(iucv->src_user_id, iucv_userid, 8); 656 iucv->transport = AF_IUCV_TRANS_IUCV; 657 sk->sk_allocation |= GFP_DMA; 658 659 write_lock_bh(&iucv_sk_list.lock); 660 __iucv_auto_name(iucv); 661 write_unlock_bh(&iucv_sk_list.lock); 662 663 if (!iucv->msglimit) 664 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 665 666 return err; 667 } 668 669 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) 670 { 671 DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); 672 struct sock *sk = sock->sk; 673 struct iucv_sock *iucv = iucv_sk(sk); 674 unsigned char user_data[16]; 675 int err; 676 677 high_nmcpy(user_data, sa->siucv_name); 678 low_nmcpy(user_data, iucv->src_name); 679 ASCEBC(user_data, sizeof(user_data)); 680 681 /* Create path. */ 682 iucv->path = iucv_path_alloc(iucv->msglimit, 683 IUCV_IPRMDATA, GFP_KERNEL); 684 if (!iucv->path) { 685 err = -ENOMEM; 686 goto done; 687 } 688 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, 689 sa->siucv_user_id, NULL, user_data, 690 sk); 691 if (err) { 692 iucv_path_free(iucv->path); 693 iucv->path = NULL; 694 switch (err) { 695 case 0x0b: /* Target communicator is not logged on */ 696 err = -ENETUNREACH; 697 break; 698 case 0x0d: /* Max connections for this guest exceeded */ 699 case 0x0e: /* Max connections for target guest exceeded */ 700 err = -EAGAIN; 701 break; 702 case 0x0f: /* Missing IUCV authorization */ 703 err = -EACCES; 704 break; 705 default: 706 err = -ECONNREFUSED; 707 break; 708 } 709 } 710 done: 711 return err; 712 } 713 714 /* Connect an unconnected socket */ 715 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 716 int alen, int flags) 717 { 718 DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); 719 struct sock *sk = sock->sk; 720 struct iucv_sock *iucv = iucv_sk(sk); 721 int err; 722 723 if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) 724 return -EINVAL; 725 726 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 727 return -EBADFD; 728 729 if (sk->sk_state == IUCV_OPEN && 730 iucv->transport == AF_IUCV_TRANS_HIPER) 731 return -EBADFD; /* explicit bind required */ 732 733 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) 734 return -EINVAL; 735 736 if (sk->sk_state == IUCV_OPEN) { 737 err = iucv_sock_autobind(sk); 738 if (unlikely(err)) 739 return err; 740 } 741 742 lock_sock(sk); 743 744 /* Set the destination information */ 745 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); 746 memcpy(iucv->dst_name, sa->siucv_name, 8); 747 748 if (iucv->transport == AF_IUCV_TRANS_HIPER) 749 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); 750 else 751 err = afiucv_path_connect(sock, addr); 752 if (err) 753 goto done; 754 755 if (sk->sk_state != IUCV_CONNECTED) 756 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 757 IUCV_DISCONN), 758 sock_sndtimeo(sk, flags & O_NONBLOCK)); 759 760 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) 761 err = -ECONNREFUSED; 762 763 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) 764 iucv_sever_path(sk, 0); 765 766 done: 767 release_sock(sk); 768 return err; 769 } 770 771 /* Move a socket into listening state. */ 772 static int iucv_sock_listen(struct socket *sock, int backlog) 773 { 774 struct sock *sk = sock->sk; 775 int err; 776 777 lock_sock(sk); 778 779 err = -EINVAL; 780 if (sk->sk_state != IUCV_BOUND) 781 goto done; 782 783 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 784 goto done; 785 786 sk->sk_max_ack_backlog = backlog; 787 sk->sk_ack_backlog = 0; 788 sk->sk_state = IUCV_LISTEN; 789 err = 0; 790 791 done: 792 release_sock(sk); 793 return err; 794 } 795 796 /* Accept a pending connection */ 797 static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 798 int flags, bool kern) 799 { 800 DECLARE_WAITQUEUE(wait, current); 801 struct sock *sk = sock->sk, *nsk; 802 long timeo; 803 int err = 0; 804 805 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 806 807 if (sk->sk_state != IUCV_LISTEN) { 808 err = -EBADFD; 809 goto done; 810 } 811 812 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 813 814 /* Wait for an incoming connection */ 815 add_wait_queue_exclusive(sk_sleep(sk), &wait); 816 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 817 set_current_state(TASK_INTERRUPTIBLE); 818 if (!timeo) { 819 err = -EAGAIN; 820 break; 821 } 822 823 release_sock(sk); 824 timeo = schedule_timeout(timeo); 825 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 826 827 if (sk->sk_state != IUCV_LISTEN) { 828 err = -EBADFD; 829 break; 830 } 831 832 if (signal_pending(current)) { 833 err = sock_intr_errno(timeo); 834 break; 835 } 836 } 837 838 set_current_state(TASK_RUNNING); 839 remove_wait_queue(sk_sleep(sk), &wait); 840 841 if (err) 842 goto done; 843 844 newsock->state = SS_CONNECTED; 845 846 done: 847 release_sock(sk); 848 return err; 849 } 850 851 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, 852 int peer) 853 { 854 DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr); 855 struct sock *sk = sock->sk; 856 struct iucv_sock *iucv = iucv_sk(sk); 857 858 addr->sa_family = AF_IUCV; 859 860 if (peer) { 861 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); 862 memcpy(siucv->siucv_name, iucv->dst_name, 8); 863 } else { 864 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); 865 memcpy(siucv->siucv_name, iucv->src_name, 8); 866 } 867 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 868 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 869 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 870 871 return sizeof(struct sockaddr_iucv); 872 } 873 874 /** 875 * iucv_send_iprm() - Send socket data in parameter list of an iucv message. 876 * @path: IUCV path 877 * @msg: Pointer to a struct iucv_message 878 * @skb: The socket data to send, skb->len MUST BE <= 7 879 * 880 * Send the socket data in the parameter list in the iucv message 881 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter 882 * list and the socket data len at index 7 (last byte). 883 * See also iucv_msg_length(). 884 * 885 * Returns the error code from the iucv_message_send() call. 886 */ 887 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, 888 struct sk_buff *skb) 889 { 890 u8 prmdata[8]; 891 892 memcpy(prmdata, (void *) skb->data, skb->len); 893 prmdata[7] = 0xff - (u8) skb->len; 894 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, 895 (void *) prmdata, 8); 896 } 897 898 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, 899 size_t len) 900 { 901 struct sock *sk = sock->sk; 902 struct iucv_sock *iucv = iucv_sk(sk); 903 size_t headroom = 0; 904 size_t linear; 905 struct sk_buff *skb; 906 struct iucv_message txmsg = {0}; 907 struct cmsghdr *cmsg; 908 int cmsg_done; 909 long timeo; 910 char user_id[9]; 911 char appl_id[9]; 912 int err; 913 int noblock = msg->msg_flags & MSG_DONTWAIT; 914 915 err = sock_error(sk); 916 if (err) 917 return err; 918 919 if (msg->msg_flags & MSG_OOB) 920 return -EOPNOTSUPP; 921 922 /* SOCK_SEQPACKET: we do not support segmented records */ 923 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) 924 return -EOPNOTSUPP; 925 926 lock_sock(sk); 927 928 if (sk->sk_shutdown & SEND_SHUTDOWN) { 929 err = -EPIPE; 930 goto out; 931 } 932 933 /* Return if the socket is not in connected state */ 934 if (sk->sk_state != IUCV_CONNECTED) { 935 err = -ENOTCONN; 936 goto out; 937 } 938 939 /* initialize defaults */ 940 cmsg_done = 0; /* check for duplicate headers */ 941 942 /* iterate over control messages */ 943 for_each_cmsghdr(cmsg, msg) { 944 if (!CMSG_OK(msg, cmsg)) { 945 err = -EINVAL; 946 goto out; 947 } 948 949 if (cmsg->cmsg_level != SOL_IUCV) 950 continue; 951 952 if (cmsg->cmsg_type & cmsg_done) { 953 err = -EINVAL; 954 goto out; 955 } 956 cmsg_done |= cmsg->cmsg_type; 957 958 switch (cmsg->cmsg_type) { 959 case SCM_IUCV_TRGCLS: 960 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { 961 err = -EINVAL; 962 goto out; 963 } 964 965 /* set iucv message target class */ 966 memcpy(&txmsg.class, 967 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); 968 969 break; 970 971 default: 972 err = -EINVAL; 973 goto out; 974 } 975 } 976 977 /* allocate one skb for each iucv message: 978 * this is fine for SOCK_SEQPACKET (unless we want to support 979 * segmented records using the MSG_EOR flag), but 980 * for SOCK_STREAM we might want to improve it in future */ 981 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 982 headroom = sizeof(struct af_iucv_trans_hdr) + 983 LL_RESERVED_SPACE(iucv->hs_dev); 984 linear = min(len, PAGE_SIZE - headroom); 985 } else { 986 if (len < PAGE_SIZE) { 987 linear = len; 988 } else { 989 /* In nonlinear "classic" iucv skb, 990 * reserve space for iucv_array 991 */ 992 headroom = sizeof(struct iucv_array) * 993 (MAX_SKB_FRAGS + 1); 994 linear = PAGE_SIZE - headroom; 995 } 996 } 997 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, 998 noblock, &err, 0); 999 if (!skb) 1000 goto out; 1001 if (headroom) 1002 skb_reserve(skb, headroom); 1003 skb_put(skb, linear); 1004 skb->len = len; 1005 skb->data_len = len - linear; 1006 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); 1007 if (err) 1008 goto fail; 1009 1010 /* wait if outstanding messages for iucv path has reached */ 1011 timeo = sock_sndtimeo(sk, noblock); 1012 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); 1013 if (err) 1014 goto fail; 1015 1016 /* return -ECONNRESET if the socket is no longer connected */ 1017 if (sk->sk_state != IUCV_CONNECTED) { 1018 err = -ECONNRESET; 1019 goto fail; 1020 } 1021 1022 /* increment and save iucv message tag for msg_completion cbk */ 1023 txmsg.tag = iucv->send_tag++; 1024 IUCV_SKB_CB(skb)->tag = txmsg.tag; 1025 1026 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1027 atomic_inc(&iucv->msg_sent); 1028 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1029 if (err) { 1030 atomic_dec(&iucv->msg_sent); 1031 goto out; 1032 } 1033 } else { /* Classic VM IUCV transport */ 1034 skb_queue_tail(&iucv->send_skb_q, skb); 1035 atomic_inc(&iucv->skbs_in_xmit); 1036 1037 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && 1038 skb->len <= 7) { 1039 err = iucv_send_iprm(iucv->path, &txmsg, skb); 1040 1041 /* on success: there is no message_complete callback */ 1042 /* for an IPRMDATA msg; remove skb from send queue */ 1043 if (err == 0) { 1044 atomic_dec(&iucv->skbs_in_xmit); 1045 skb_unlink(skb, &iucv->send_skb_q); 1046 consume_skb(skb); 1047 } 1048 1049 /* this error should never happen since the */ 1050 /* IUCV_IPRMDATA path flag is set... sever path */ 1051 if (err == 0x15) { 1052 pr_iucv->path_sever(iucv->path, NULL); 1053 atomic_dec(&iucv->skbs_in_xmit); 1054 skb_unlink(skb, &iucv->send_skb_q); 1055 err = -EPIPE; 1056 goto fail; 1057 } 1058 } else if (skb_is_nonlinear(skb)) { 1059 struct iucv_array *iba = (struct iucv_array *)skb->head; 1060 int i; 1061 1062 /* skip iucv_array lying in the headroom */ 1063 iba[0].address = (u32)(addr_t)skb->data; 1064 iba[0].length = (u32)skb_headlen(skb); 1065 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1066 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1067 1068 iba[i + 1].address = 1069 (u32)(addr_t)skb_frag_address(frag); 1070 iba[i + 1].length = (u32)skb_frag_size(frag); 1071 } 1072 err = pr_iucv->message_send(iucv->path, &txmsg, 1073 IUCV_IPBUFLST, 0, 1074 (void *)iba, skb->len); 1075 } else { /* non-IPRM Linear skb */ 1076 err = pr_iucv->message_send(iucv->path, &txmsg, 1077 0, 0, (void *)skb->data, skb->len); 1078 } 1079 if (err) { 1080 if (err == 3) { 1081 user_id[8] = 0; 1082 memcpy(user_id, iucv->dst_user_id, 8); 1083 appl_id[8] = 0; 1084 memcpy(appl_id, iucv->dst_name, 8); 1085 pr_err( 1086 "Application %s on z/VM guest %s exceeds message limit\n", 1087 appl_id, user_id); 1088 err = -EAGAIN; 1089 } else { 1090 err = -EPIPE; 1091 } 1092 1093 atomic_dec(&iucv->skbs_in_xmit); 1094 skb_unlink(skb, &iucv->send_skb_q); 1095 goto fail; 1096 } 1097 } 1098 1099 release_sock(sk); 1100 return len; 1101 1102 fail: 1103 kfree_skb(skb); 1104 out: 1105 release_sock(sk); 1106 return err; 1107 } 1108 1109 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) 1110 { 1111 size_t headroom, linear; 1112 struct sk_buff *skb; 1113 int err; 1114 1115 if (len < PAGE_SIZE) { 1116 headroom = 0; 1117 linear = len; 1118 } else { 1119 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); 1120 linear = PAGE_SIZE - headroom; 1121 } 1122 skb = alloc_skb_with_frags(headroom + linear, len - linear, 1123 0, &err, GFP_ATOMIC | GFP_DMA); 1124 WARN_ONCE(!skb, 1125 "alloc of recv iucv skb len=%lu failed with errcode=%d\n", 1126 len, err); 1127 if (skb) { 1128 if (headroom) 1129 skb_reserve(skb, headroom); 1130 skb_put(skb, linear); 1131 skb->len = len; 1132 skb->data_len = len - linear; 1133 } 1134 return skb; 1135 } 1136 1137 /* iucv_process_message() - Receive a single outstanding IUCV message 1138 * 1139 * Locking: must be called with message_q.lock held 1140 */ 1141 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 1142 struct iucv_path *path, 1143 struct iucv_message *msg) 1144 { 1145 int rc; 1146 unsigned int len; 1147 1148 len = iucv_msg_length(msg); 1149 1150 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1151 /* Note: the first 4 bytes are reserved for msg tag */ 1152 IUCV_SKB_CB(skb)->class = msg->class; 1153 1154 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1155 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1156 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { 1157 skb->data = NULL; 1158 skb->len = 0; 1159 } 1160 } else { 1161 if (skb_is_nonlinear(skb)) { 1162 struct iucv_array *iba = (struct iucv_array *)skb->head; 1163 int i; 1164 1165 iba[0].address = (u32)(addr_t)skb->data; 1166 iba[0].length = (u32)skb_headlen(skb); 1167 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1168 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1169 1170 iba[i + 1].address = 1171 (u32)(addr_t)skb_frag_address(frag); 1172 iba[i + 1].length = (u32)skb_frag_size(frag); 1173 } 1174 rc = pr_iucv->message_receive(path, msg, 1175 IUCV_IPBUFLST, 1176 (void *)iba, len, NULL); 1177 } else { 1178 rc = pr_iucv->message_receive(path, msg, 1179 msg->flags & IUCV_IPRMDATA, 1180 skb->data, len, NULL); 1181 } 1182 if (rc) { 1183 kfree_skb(skb); 1184 return; 1185 } 1186 WARN_ON_ONCE(skb->len != len); 1187 } 1188 1189 IUCV_SKB_CB(skb)->offset = 0; 1190 if (sk_filter(sk, skb)) { 1191 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 1192 kfree_skb(skb); 1193 return; 1194 } 1195 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ 1196 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 1197 } 1198 1199 /* iucv_process_message_q() - Process outstanding IUCV messages 1200 * 1201 * Locking: must be called with message_q.lock held 1202 */ 1203 static void iucv_process_message_q(struct sock *sk) 1204 { 1205 struct iucv_sock *iucv = iucv_sk(sk); 1206 struct sk_buff *skb; 1207 struct sock_msg_q *p, *n; 1208 1209 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1210 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); 1211 if (!skb) 1212 break; 1213 iucv_process_message(sk, skb, p->path, &p->msg); 1214 list_del(&p->list); 1215 kfree(p); 1216 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1217 break; 1218 } 1219 } 1220 1221 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, 1222 size_t len, int flags) 1223 { 1224 struct sock *sk = sock->sk; 1225 struct iucv_sock *iucv = iucv_sk(sk); 1226 unsigned int copied, rlen; 1227 struct sk_buff *skb, *rskb, *cskb; 1228 int err = 0; 1229 u32 offset; 1230 1231 if ((sk->sk_state == IUCV_DISCONN) && 1232 skb_queue_empty(&iucv->backlog_skb_q) && 1233 skb_queue_empty(&sk->sk_receive_queue) && 1234 list_empty(&iucv->message_q.list)) 1235 return 0; 1236 1237 if (flags & (MSG_OOB)) 1238 return -EOPNOTSUPP; 1239 1240 /* receive/dequeue next skb: 1241 * the function understands MSG_PEEK and, thus, does not dequeue skb */ 1242 skb = skb_recv_datagram(sk, flags, &err); 1243 if (!skb) { 1244 if (sk->sk_shutdown & RCV_SHUTDOWN) 1245 return 0; 1246 return err; 1247 } 1248 1249 offset = IUCV_SKB_CB(skb)->offset; 1250 rlen = skb->len - offset; /* real length of skb */ 1251 copied = min_t(unsigned int, rlen, len); 1252 if (!rlen) 1253 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1254 1255 cskb = skb; 1256 if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { 1257 if (!(flags & MSG_PEEK)) 1258 skb_queue_head(&sk->sk_receive_queue, skb); 1259 return -EFAULT; 1260 } 1261 1262 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ 1263 if (sk->sk_type == SOCK_SEQPACKET) { 1264 if (copied < rlen) 1265 msg->msg_flags |= MSG_TRUNC; 1266 /* each iucv message contains a complete record */ 1267 msg->msg_flags |= MSG_EOR; 1268 } 1269 1270 /* create control message to store iucv msg target class: 1271 * get the trgcls from the control buffer of the skb due to 1272 * fragmentation of original iucv message. */ 1273 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1274 sizeof(IUCV_SKB_CB(skb)->class), 1275 (void *)&IUCV_SKB_CB(skb)->class); 1276 if (err) { 1277 if (!(flags & MSG_PEEK)) 1278 skb_queue_head(&sk->sk_receive_queue, skb); 1279 return err; 1280 } 1281 1282 /* Mark read part of skb as used */ 1283 if (!(flags & MSG_PEEK)) { 1284 1285 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1286 if (sk->sk_type == SOCK_STREAM) { 1287 if (copied < rlen) { 1288 IUCV_SKB_CB(skb)->offset = offset + copied; 1289 skb_queue_head(&sk->sk_receive_queue, skb); 1290 goto done; 1291 } 1292 } 1293 1294 consume_skb(skb); 1295 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1296 atomic_inc(&iucv->msg_recv); 1297 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { 1298 WARN_ON(1); 1299 iucv_sock_close(sk); 1300 return -EFAULT; 1301 } 1302 } 1303 1304 /* Queue backlog skbs */ 1305 spin_lock_bh(&iucv->message_q.lock); 1306 rskb = skb_dequeue(&iucv->backlog_skb_q); 1307 while (rskb) { 1308 IUCV_SKB_CB(rskb)->offset = 0; 1309 if (__sock_queue_rcv_skb(sk, rskb)) { 1310 /* handle rcv queue full */ 1311 skb_queue_head(&iucv->backlog_skb_q, 1312 rskb); 1313 break; 1314 } 1315 rskb = skb_dequeue(&iucv->backlog_skb_q); 1316 } 1317 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1318 if (!list_empty(&iucv->message_q.list)) 1319 iucv_process_message_q(sk); 1320 if (atomic_read(&iucv->msg_recv) >= 1321 iucv->msglimit / 2) { 1322 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); 1323 if (err) { 1324 sk->sk_state = IUCV_DISCONN; 1325 sk->sk_state_change(sk); 1326 } 1327 } 1328 } 1329 spin_unlock_bh(&iucv->message_q.lock); 1330 } 1331 1332 done: 1333 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ 1334 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) 1335 copied = rlen; 1336 1337 return copied; 1338 } 1339 1340 static inline __poll_t iucv_accept_poll(struct sock *parent) 1341 { 1342 struct iucv_sock *isk, *n; 1343 struct sock *sk; 1344 1345 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 1346 sk = (struct sock *) isk; 1347 1348 if (sk->sk_state == IUCV_CONNECTED) 1349 return EPOLLIN | EPOLLRDNORM; 1350 } 1351 1352 return 0; 1353 } 1354 1355 static __poll_t iucv_sock_poll(struct file *file, struct socket *sock, 1356 poll_table *wait) 1357 { 1358 struct sock *sk = sock->sk; 1359 __poll_t mask = 0; 1360 1361 sock_poll_wait(file, sock, wait); 1362 1363 if (sk->sk_state == IUCV_LISTEN) 1364 return iucv_accept_poll(sk); 1365 1366 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1367 mask |= EPOLLERR | 1368 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 1369 1370 if (sk->sk_shutdown & RCV_SHUTDOWN) 1371 mask |= EPOLLRDHUP; 1372 1373 if (sk->sk_shutdown == SHUTDOWN_MASK) 1374 mask |= EPOLLHUP; 1375 1376 if (!skb_queue_empty(&sk->sk_receive_queue) || 1377 (sk->sk_shutdown & RCV_SHUTDOWN)) 1378 mask |= EPOLLIN | EPOLLRDNORM; 1379 1380 if (sk->sk_state == IUCV_CLOSED) 1381 mask |= EPOLLHUP; 1382 1383 if (sk->sk_state == IUCV_DISCONN) 1384 mask |= EPOLLIN; 1385 1386 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1387 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1388 else 1389 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1390 1391 return mask; 1392 } 1393 1394 static int iucv_sock_shutdown(struct socket *sock, int how) 1395 { 1396 struct sock *sk = sock->sk; 1397 struct iucv_sock *iucv = iucv_sk(sk); 1398 struct iucv_message txmsg; 1399 int err = 0; 1400 1401 how++; 1402 1403 if ((how & ~SHUTDOWN_MASK) || !how) 1404 return -EINVAL; 1405 1406 lock_sock(sk); 1407 switch (sk->sk_state) { 1408 case IUCV_LISTEN: 1409 case IUCV_DISCONN: 1410 case IUCV_CLOSING: 1411 case IUCV_CLOSED: 1412 err = -ENOTCONN; 1413 goto fail; 1414 default: 1415 break; 1416 } 1417 1418 if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) && 1419 sk->sk_state == IUCV_CONNECTED) { 1420 if (iucv->transport == AF_IUCV_TRANS_IUCV) { 1421 txmsg.class = 0; 1422 txmsg.tag = 0; 1423 err = pr_iucv->message_send(iucv->path, &txmsg, 1424 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); 1425 if (err) { 1426 switch (err) { 1427 case 1: 1428 err = -ENOTCONN; 1429 break; 1430 case 2: 1431 err = -ECONNRESET; 1432 break; 1433 default: 1434 err = -ENOTCONN; 1435 break; 1436 } 1437 } 1438 } else 1439 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); 1440 } 1441 1442 sk->sk_shutdown |= how; 1443 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1444 if ((iucv->transport == AF_IUCV_TRANS_IUCV) && 1445 iucv->path) { 1446 err = pr_iucv->path_quiesce(iucv->path, NULL); 1447 if (err) 1448 err = -ENOTCONN; 1449 /* skb_queue_purge(&sk->sk_receive_queue); */ 1450 } 1451 skb_queue_purge(&sk->sk_receive_queue); 1452 } 1453 1454 /* Wake up anyone sleeping in poll */ 1455 sk->sk_state_change(sk); 1456 1457 fail: 1458 release_sock(sk); 1459 return err; 1460 } 1461 1462 static int iucv_sock_release(struct socket *sock) 1463 { 1464 struct sock *sk = sock->sk; 1465 int err = 0; 1466 1467 if (!sk) 1468 return 0; 1469 1470 iucv_sock_close(sk); 1471 1472 sock_orphan(sk); 1473 iucv_sock_kill(sk); 1474 return err; 1475 } 1476 1477 /* getsockopt and setsockopt */ 1478 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, 1479 sockptr_t optval, unsigned int optlen) 1480 { 1481 struct sock *sk = sock->sk; 1482 struct iucv_sock *iucv = iucv_sk(sk); 1483 int val; 1484 int rc; 1485 1486 if (level != SOL_IUCV) 1487 return -ENOPROTOOPT; 1488 1489 if (optlen < sizeof(int)) 1490 return -EINVAL; 1491 1492 if (copy_from_sockptr(&val, optval, sizeof(int))) 1493 return -EFAULT; 1494 1495 rc = 0; 1496 1497 lock_sock(sk); 1498 switch (optname) { 1499 case SO_IPRMDATA_MSG: 1500 if (val) 1501 iucv->flags |= IUCV_IPRMDATA; 1502 else 1503 iucv->flags &= ~IUCV_IPRMDATA; 1504 break; 1505 case SO_MSGLIMIT: 1506 switch (sk->sk_state) { 1507 case IUCV_OPEN: 1508 case IUCV_BOUND: 1509 if (val < 1 || val > U16_MAX) 1510 rc = -EINVAL; 1511 else 1512 iucv->msglimit = val; 1513 break; 1514 default: 1515 rc = -EINVAL; 1516 break; 1517 } 1518 break; 1519 default: 1520 rc = -ENOPROTOOPT; 1521 break; 1522 } 1523 release_sock(sk); 1524 1525 return rc; 1526 } 1527 1528 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, 1529 char __user *optval, int __user *optlen) 1530 { 1531 struct sock *sk = sock->sk; 1532 struct iucv_sock *iucv = iucv_sk(sk); 1533 unsigned int val; 1534 int len; 1535 1536 if (level != SOL_IUCV) 1537 return -ENOPROTOOPT; 1538 1539 if (get_user(len, optlen)) 1540 return -EFAULT; 1541 1542 if (len < 0) 1543 return -EINVAL; 1544 1545 len = min_t(unsigned int, len, sizeof(int)); 1546 1547 switch (optname) { 1548 case SO_IPRMDATA_MSG: 1549 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; 1550 break; 1551 case SO_MSGLIMIT: 1552 lock_sock(sk); 1553 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ 1554 : iucv->msglimit; /* default */ 1555 release_sock(sk); 1556 break; 1557 case SO_MSGSIZE: 1558 if (sk->sk_state == IUCV_OPEN) 1559 return -EBADFD; 1560 val = (iucv->hs_dev) ? iucv->hs_dev->mtu - 1561 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : 1562 0x7fffffff; 1563 break; 1564 default: 1565 return -ENOPROTOOPT; 1566 } 1567 1568 if (put_user(len, optlen)) 1569 return -EFAULT; 1570 if (copy_to_user(optval, &val, len)) 1571 return -EFAULT; 1572 1573 return 0; 1574 } 1575 1576 1577 /* Callback wrappers - called from iucv base support */ 1578 static int iucv_callback_connreq(struct iucv_path *path, 1579 u8 ipvmid[8], u8 ipuser[16]) 1580 { 1581 unsigned char user_data[16]; 1582 unsigned char nuser_data[16]; 1583 unsigned char src_name[8]; 1584 struct sock *sk, *nsk; 1585 struct iucv_sock *iucv, *niucv; 1586 int err; 1587 1588 memcpy(src_name, ipuser, 8); 1589 EBCASC(src_name, 8); 1590 /* Find out if this path belongs to af_iucv. */ 1591 read_lock(&iucv_sk_list.lock); 1592 iucv = NULL; 1593 sk = NULL; 1594 sk_for_each(sk, &iucv_sk_list.head) 1595 if (sk->sk_state == IUCV_LISTEN && 1596 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 1597 /* 1598 * Found a listening socket with 1599 * src_name == ipuser[0-7]. 1600 */ 1601 iucv = iucv_sk(sk); 1602 break; 1603 } 1604 read_unlock(&iucv_sk_list.lock); 1605 if (!iucv) 1606 /* No socket found, not one of our paths. */ 1607 return -EINVAL; 1608 1609 bh_lock_sock(sk); 1610 1611 /* Check if parent socket is listening */ 1612 low_nmcpy(user_data, iucv->src_name); 1613 high_nmcpy(user_data, iucv->dst_name); 1614 ASCEBC(user_data, sizeof(user_data)); 1615 if (sk->sk_state != IUCV_LISTEN) { 1616 err = pr_iucv->path_sever(path, user_data); 1617 iucv_path_free(path); 1618 goto fail; 1619 } 1620 1621 /* Check for backlog size */ 1622 if (sk_acceptq_is_full(sk)) { 1623 err = pr_iucv->path_sever(path, user_data); 1624 iucv_path_free(path); 1625 goto fail; 1626 } 1627 1628 /* Create the new socket */ 1629 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); 1630 if (!nsk) { 1631 err = pr_iucv->path_sever(path, user_data); 1632 iucv_path_free(path); 1633 goto fail; 1634 } 1635 1636 niucv = iucv_sk(nsk); 1637 iucv_sock_init(nsk, sk); 1638 niucv->transport = AF_IUCV_TRANS_IUCV; 1639 nsk->sk_allocation |= GFP_DMA; 1640 1641 /* Set the new iucv_sock */ 1642 memcpy(niucv->dst_name, ipuser + 8, 8); 1643 EBCASC(niucv->dst_name, 8); 1644 memcpy(niucv->dst_user_id, ipvmid, 8); 1645 memcpy(niucv->src_name, iucv->src_name, 8); 1646 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1647 niucv->path = path; 1648 1649 /* Call iucv_accept */ 1650 high_nmcpy(nuser_data, ipuser + 8); 1651 memcpy(nuser_data + 8, niucv->src_name, 8); 1652 ASCEBC(nuser_data + 8, 8); 1653 1654 /* set message limit for path based on msglimit of accepting socket */ 1655 niucv->msglimit = iucv->msglimit; 1656 path->msglim = iucv->msglimit; 1657 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); 1658 if (err) { 1659 iucv_sever_path(nsk, 1); 1660 iucv_sock_kill(nsk); 1661 goto fail; 1662 } 1663 1664 iucv_accept_enqueue(sk, nsk); 1665 1666 /* Wake up accept */ 1667 nsk->sk_state = IUCV_CONNECTED; 1668 sk->sk_data_ready(sk); 1669 err = 0; 1670 fail: 1671 bh_unlock_sock(sk); 1672 return 0; 1673 } 1674 1675 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1676 { 1677 struct sock *sk = path->private; 1678 1679 sk->sk_state = IUCV_CONNECTED; 1680 sk->sk_state_change(sk); 1681 } 1682 1683 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1684 { 1685 struct sock *sk = path->private; 1686 struct iucv_sock *iucv = iucv_sk(sk); 1687 struct sk_buff *skb; 1688 struct sock_msg_q *save_msg; 1689 int len; 1690 1691 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1692 pr_iucv->message_reject(path, msg); 1693 return; 1694 } 1695 1696 spin_lock(&iucv->message_q.lock); 1697 1698 if (!list_empty(&iucv->message_q.list) || 1699 !skb_queue_empty(&iucv->backlog_skb_q)) 1700 goto save_message; 1701 1702 len = atomic_read(&sk->sk_rmem_alloc); 1703 len += SKB_TRUESIZE(iucv_msg_length(msg)); 1704 if (len > sk->sk_rcvbuf) 1705 goto save_message; 1706 1707 skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); 1708 if (!skb) 1709 goto save_message; 1710 1711 iucv_process_message(sk, skb, path, msg); 1712 goto out_unlock; 1713 1714 save_message: 1715 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1716 if (!save_msg) 1717 goto out_unlock; 1718 save_msg->path = path; 1719 save_msg->msg = *msg; 1720 1721 list_add_tail(&save_msg->list, &iucv->message_q.list); 1722 1723 out_unlock: 1724 spin_unlock(&iucv->message_q.lock); 1725 } 1726 1727 static void iucv_callback_txdone(struct iucv_path *path, 1728 struct iucv_message *msg) 1729 { 1730 struct sock *sk = path->private; 1731 struct sk_buff *this = NULL; 1732 struct sk_buff_head *list; 1733 struct sk_buff *list_skb; 1734 struct iucv_sock *iucv; 1735 unsigned long flags; 1736 1737 iucv = iucv_sk(sk); 1738 list = &iucv->send_skb_q; 1739 1740 bh_lock_sock(sk); 1741 1742 spin_lock_irqsave(&list->lock, flags); 1743 skb_queue_walk(list, list_skb) { 1744 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { 1745 this = list_skb; 1746 break; 1747 } 1748 } 1749 if (this) { 1750 atomic_dec(&iucv->skbs_in_xmit); 1751 __skb_unlink(this, list); 1752 } 1753 1754 spin_unlock_irqrestore(&list->lock, flags); 1755 1756 if (this) { 1757 consume_skb(this); 1758 /* wake up any process waiting for sending */ 1759 iucv_sock_wake_msglim(sk); 1760 } 1761 1762 if (sk->sk_state == IUCV_CLOSING) { 1763 if (atomic_read(&iucv->skbs_in_xmit) == 0) { 1764 sk->sk_state = IUCV_CLOSED; 1765 sk->sk_state_change(sk); 1766 } 1767 } 1768 bh_unlock_sock(sk); 1769 1770 } 1771 1772 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1773 { 1774 struct sock *sk = path->private; 1775 1776 if (sk->sk_state == IUCV_CLOSED) 1777 return; 1778 1779 bh_lock_sock(sk); 1780 iucv_sever_path(sk, 1); 1781 sk->sk_state = IUCV_DISCONN; 1782 1783 sk->sk_state_change(sk); 1784 bh_unlock_sock(sk); 1785 } 1786 1787 /* called if the other communication side shuts down its RECV direction; 1788 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. 1789 */ 1790 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) 1791 { 1792 struct sock *sk = path->private; 1793 1794 bh_lock_sock(sk); 1795 if (sk->sk_state != IUCV_CLOSED) { 1796 sk->sk_shutdown |= SEND_SHUTDOWN; 1797 sk->sk_state_change(sk); 1798 } 1799 bh_unlock_sock(sk); 1800 } 1801 1802 static struct iucv_handler af_iucv_handler = { 1803 .path_pending = iucv_callback_connreq, 1804 .path_complete = iucv_callback_connack, 1805 .path_severed = iucv_callback_connrej, 1806 .message_pending = iucv_callback_rx, 1807 .message_complete = iucv_callback_txdone, 1808 .path_quiesced = iucv_callback_shutdown, 1809 }; 1810 1811 /***************** HiperSockets transport callbacks ********************/ 1812 static void afiucv_swap_src_dest(struct sk_buff *skb) 1813 { 1814 struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); 1815 char tmpID[8]; 1816 char tmpName[8]; 1817 1818 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 1819 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 1820 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 1821 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 1822 memcpy(tmpID, trans_hdr->srcUserID, 8); 1823 memcpy(tmpName, trans_hdr->srcAppName, 8); 1824 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); 1825 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); 1826 memcpy(trans_hdr->destUserID, tmpID, 8); 1827 memcpy(trans_hdr->destAppName, tmpName, 8); 1828 skb_push(skb, ETH_HLEN); 1829 memset(skb->data, 0, ETH_HLEN); 1830 } 1831 1832 /* 1833 * afiucv_hs_callback_syn - react on received SYN 1834 */ 1835 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) 1836 { 1837 struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); 1838 struct sock *nsk; 1839 struct iucv_sock *iucv, *niucv; 1840 int err; 1841 1842 iucv = iucv_sk(sk); 1843 if (!iucv) { 1844 /* no sock - connection refused */ 1845 afiucv_swap_src_dest(skb); 1846 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1847 err = dev_queue_xmit(skb); 1848 goto out; 1849 } 1850 1851 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); 1852 bh_lock_sock(sk); 1853 if ((sk->sk_state != IUCV_LISTEN) || 1854 sk_acceptq_is_full(sk) || 1855 !nsk) { 1856 /* error on server socket - connection refused */ 1857 afiucv_swap_src_dest(skb); 1858 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1859 err = dev_queue_xmit(skb); 1860 iucv_sock_kill(nsk); 1861 bh_unlock_sock(sk); 1862 goto out; 1863 } 1864 1865 niucv = iucv_sk(nsk); 1866 iucv_sock_init(nsk, sk); 1867 niucv->transport = AF_IUCV_TRANS_HIPER; 1868 niucv->msglimit = iucv->msglimit; 1869 if (!trans_hdr->window) 1870 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; 1871 else 1872 niucv->msglimit_peer = trans_hdr->window; 1873 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); 1874 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); 1875 memcpy(niucv->src_name, iucv->src_name, 8); 1876 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1877 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; 1878 niucv->hs_dev = iucv->hs_dev; 1879 dev_hold(niucv->hs_dev); 1880 afiucv_swap_src_dest(skb); 1881 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; 1882 trans_hdr->window = niucv->msglimit; 1883 /* if receiver acks the xmit connection is established */ 1884 err = dev_queue_xmit(skb); 1885 if (!err) { 1886 iucv_accept_enqueue(sk, nsk); 1887 nsk->sk_state = IUCV_CONNECTED; 1888 sk->sk_data_ready(sk); 1889 } else 1890 iucv_sock_kill(nsk); 1891 bh_unlock_sock(sk); 1892 1893 out: 1894 return NET_RX_SUCCESS; 1895 } 1896 1897 /* 1898 * afiucv_hs_callback_synack() - react on received SYN-ACK 1899 */ 1900 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) 1901 { 1902 struct iucv_sock *iucv = iucv_sk(sk); 1903 1904 if (!iucv || sk->sk_state != IUCV_BOUND) { 1905 kfree_skb(skb); 1906 return NET_RX_SUCCESS; 1907 } 1908 1909 bh_lock_sock(sk); 1910 iucv->msglimit_peer = iucv_trans_hdr(skb)->window; 1911 sk->sk_state = IUCV_CONNECTED; 1912 sk->sk_state_change(sk); 1913 bh_unlock_sock(sk); 1914 consume_skb(skb); 1915 return NET_RX_SUCCESS; 1916 } 1917 1918 /* 1919 * afiucv_hs_callback_synfin() - react on received SYN_FIN 1920 */ 1921 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) 1922 { 1923 struct iucv_sock *iucv = iucv_sk(sk); 1924 1925 if (!iucv || sk->sk_state != IUCV_BOUND) { 1926 kfree_skb(skb); 1927 return NET_RX_SUCCESS; 1928 } 1929 1930 bh_lock_sock(sk); 1931 sk->sk_state = IUCV_DISCONN; 1932 sk->sk_state_change(sk); 1933 bh_unlock_sock(sk); 1934 consume_skb(skb); 1935 return NET_RX_SUCCESS; 1936 } 1937 1938 /* 1939 * afiucv_hs_callback_fin() - react on received FIN 1940 */ 1941 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) 1942 { 1943 struct iucv_sock *iucv = iucv_sk(sk); 1944 1945 /* other end of connection closed */ 1946 if (!iucv) { 1947 kfree_skb(skb); 1948 return NET_RX_SUCCESS; 1949 } 1950 1951 bh_lock_sock(sk); 1952 if (sk->sk_state == IUCV_CONNECTED) { 1953 sk->sk_state = IUCV_DISCONN; 1954 sk->sk_state_change(sk); 1955 } 1956 bh_unlock_sock(sk); 1957 consume_skb(skb); 1958 return NET_RX_SUCCESS; 1959 } 1960 1961 /* 1962 * afiucv_hs_callback_win() - react on received WIN 1963 */ 1964 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) 1965 { 1966 struct iucv_sock *iucv = iucv_sk(sk); 1967 1968 if (!iucv) 1969 return NET_RX_SUCCESS; 1970 1971 if (sk->sk_state != IUCV_CONNECTED) 1972 return NET_RX_SUCCESS; 1973 1974 atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent); 1975 iucv_sock_wake_msglim(sk); 1976 return NET_RX_SUCCESS; 1977 } 1978 1979 /* 1980 * afiucv_hs_callback_rx() - react on received data 1981 */ 1982 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) 1983 { 1984 struct iucv_sock *iucv = iucv_sk(sk); 1985 1986 if (!iucv) { 1987 kfree_skb(skb); 1988 return NET_RX_SUCCESS; 1989 } 1990 1991 if (sk->sk_state != IUCV_CONNECTED) { 1992 kfree_skb(skb); 1993 return NET_RX_SUCCESS; 1994 } 1995 1996 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1997 kfree_skb(skb); 1998 return NET_RX_SUCCESS; 1999 } 2000 2001 /* write stuff from iucv_msg to skb cb */ 2002 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2003 skb_reset_transport_header(skb); 2004 skb_reset_network_header(skb); 2005 IUCV_SKB_CB(skb)->offset = 0; 2006 if (sk_filter(sk, skb)) { 2007 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ 2008 kfree_skb(skb); 2009 return NET_RX_SUCCESS; 2010 } 2011 2012 spin_lock(&iucv->message_q.lock); 2013 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2014 if (__sock_queue_rcv_skb(sk, skb)) 2015 /* handle rcv queue full */ 2016 skb_queue_tail(&iucv->backlog_skb_q, skb); 2017 } else 2018 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 2019 spin_unlock(&iucv->message_q.lock); 2020 return NET_RX_SUCCESS; 2021 } 2022 2023 /* 2024 * afiucv_hs_rcv() - base function for arriving data through HiperSockets 2025 * transport 2026 * called from netif RX softirq 2027 */ 2028 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 2029 struct packet_type *pt, struct net_device *orig_dev) 2030 { 2031 struct sock *sk; 2032 struct iucv_sock *iucv; 2033 struct af_iucv_trans_hdr *trans_hdr; 2034 int err = NET_RX_SUCCESS; 2035 char nullstring[8]; 2036 2037 if (!pskb_may_pull(skb, sizeof(*trans_hdr))) { 2038 kfree_skb(skb); 2039 return NET_RX_SUCCESS; 2040 } 2041 2042 trans_hdr = iucv_trans_hdr(skb); 2043 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 2044 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 2045 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 2046 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 2047 memset(nullstring, 0, sizeof(nullstring)); 2048 iucv = NULL; 2049 sk = NULL; 2050 read_lock(&iucv_sk_list.lock); 2051 sk_for_each(sk, &iucv_sk_list.head) { 2052 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { 2053 if ((!memcmp(&iucv_sk(sk)->src_name, 2054 trans_hdr->destAppName, 8)) && 2055 (!memcmp(&iucv_sk(sk)->src_user_id, 2056 trans_hdr->destUserID, 8)) && 2057 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && 2058 (!memcmp(&iucv_sk(sk)->dst_user_id, 2059 nullstring, 8))) { 2060 iucv = iucv_sk(sk); 2061 break; 2062 } 2063 } else { 2064 if ((!memcmp(&iucv_sk(sk)->src_name, 2065 trans_hdr->destAppName, 8)) && 2066 (!memcmp(&iucv_sk(sk)->src_user_id, 2067 trans_hdr->destUserID, 8)) && 2068 (!memcmp(&iucv_sk(sk)->dst_name, 2069 trans_hdr->srcAppName, 8)) && 2070 (!memcmp(&iucv_sk(sk)->dst_user_id, 2071 trans_hdr->srcUserID, 8))) { 2072 iucv = iucv_sk(sk); 2073 break; 2074 } 2075 } 2076 } 2077 read_unlock(&iucv_sk_list.lock); 2078 if (!iucv) 2079 sk = NULL; 2080 2081 /* no sock 2082 how should we send with no sock 2083 1) send without sock no send rc checking? 2084 2) introduce default sock to handle this cases 2085 2086 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case 2087 data -> send FIN 2088 SYN|ACK, SYN|FIN, FIN -> no action? */ 2089 2090 switch (trans_hdr->flags) { 2091 case AF_IUCV_FLAG_SYN: 2092 /* connect request */ 2093 err = afiucv_hs_callback_syn(sk, skb); 2094 break; 2095 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): 2096 /* connect request confirmed */ 2097 err = afiucv_hs_callback_synack(sk, skb); 2098 break; 2099 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): 2100 /* connect request refused */ 2101 err = afiucv_hs_callback_synfin(sk, skb); 2102 break; 2103 case (AF_IUCV_FLAG_FIN): 2104 /* close request */ 2105 err = afiucv_hs_callback_fin(sk, skb); 2106 break; 2107 case (AF_IUCV_FLAG_WIN): 2108 err = afiucv_hs_callback_win(sk, skb); 2109 if (skb->len == sizeof(struct af_iucv_trans_hdr)) { 2110 consume_skb(skb); 2111 break; 2112 } 2113 fallthrough; /* and receive non-zero length data */ 2114 case (AF_IUCV_FLAG_SHT): 2115 /* shutdown request */ 2116 fallthrough; /* and receive zero length data */ 2117 case 0: 2118 /* plain data frame */ 2119 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; 2120 err = afiucv_hs_callback_rx(sk, skb); 2121 break; 2122 default: 2123 kfree_skb(skb); 2124 } 2125 2126 return err; 2127 } 2128 2129 /* 2130 * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets 2131 * transport 2132 */ 2133 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n) 2134 { 2135 struct iucv_sock *iucv = iucv_sk(sk); 2136 2137 if (sock_flag(sk, SOCK_ZAPPED)) 2138 return; 2139 2140 switch (n) { 2141 case TX_NOTIFY_OK: 2142 atomic_dec(&iucv->skbs_in_xmit); 2143 iucv_sock_wake_msglim(sk); 2144 break; 2145 case TX_NOTIFY_PENDING: 2146 atomic_inc(&iucv->pendings); 2147 break; 2148 case TX_NOTIFY_DELAYED_OK: 2149 atomic_dec(&iucv->skbs_in_xmit); 2150 if (atomic_dec_return(&iucv->pendings) <= 0) 2151 iucv_sock_wake_msglim(sk); 2152 break; 2153 default: 2154 atomic_dec(&iucv->skbs_in_xmit); 2155 if (sk->sk_state == IUCV_CONNECTED) { 2156 sk->sk_state = IUCV_DISCONN; 2157 sk->sk_state_change(sk); 2158 } 2159 } 2160 2161 if (sk->sk_state == IUCV_CLOSING) { 2162 if (atomic_read(&iucv->skbs_in_xmit) == 0) { 2163 sk->sk_state = IUCV_CLOSED; 2164 sk->sk_state_change(sk); 2165 } 2166 } 2167 } 2168 2169 /* 2170 * afiucv_netdev_event: handle netdev notifier chain events 2171 */ 2172 static int afiucv_netdev_event(struct notifier_block *this, 2173 unsigned long event, void *ptr) 2174 { 2175 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2176 struct sock *sk; 2177 struct iucv_sock *iucv; 2178 2179 switch (event) { 2180 case NETDEV_REBOOT: 2181 case NETDEV_GOING_DOWN: 2182 sk_for_each(sk, &iucv_sk_list.head) { 2183 iucv = iucv_sk(sk); 2184 if ((iucv->hs_dev == event_dev) && 2185 (sk->sk_state == IUCV_CONNECTED)) { 2186 if (event == NETDEV_GOING_DOWN) 2187 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 2188 sk->sk_state = IUCV_DISCONN; 2189 sk->sk_state_change(sk); 2190 } 2191 } 2192 break; 2193 case NETDEV_DOWN: 2194 case NETDEV_UNREGISTER: 2195 default: 2196 break; 2197 } 2198 return NOTIFY_DONE; 2199 } 2200 2201 static struct notifier_block afiucv_netdev_notifier = { 2202 .notifier_call = afiucv_netdev_event, 2203 }; 2204 2205 static const struct proto_ops iucv_sock_ops = { 2206 .family = PF_IUCV, 2207 .owner = THIS_MODULE, 2208 .release = iucv_sock_release, 2209 .bind = iucv_sock_bind, 2210 .connect = iucv_sock_connect, 2211 .listen = iucv_sock_listen, 2212 .accept = iucv_sock_accept, 2213 .getname = iucv_sock_getname, 2214 .sendmsg = iucv_sock_sendmsg, 2215 .recvmsg = iucv_sock_recvmsg, 2216 .poll = iucv_sock_poll, 2217 .ioctl = sock_no_ioctl, 2218 .mmap = sock_no_mmap, 2219 .socketpair = sock_no_socketpair, 2220 .shutdown = iucv_sock_shutdown, 2221 .setsockopt = iucv_sock_setsockopt, 2222 .getsockopt = iucv_sock_getsockopt, 2223 }; 2224 2225 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, 2226 int kern) 2227 { 2228 struct sock *sk; 2229 2230 if (protocol && protocol != PF_IUCV) 2231 return -EPROTONOSUPPORT; 2232 2233 sock->state = SS_UNCONNECTED; 2234 2235 switch (sock->type) { 2236 case SOCK_STREAM: 2237 case SOCK_SEQPACKET: 2238 /* currently, proto ops can handle both sk types */ 2239 sock->ops = &iucv_sock_ops; 2240 break; 2241 default: 2242 return -ESOCKTNOSUPPORT; 2243 } 2244 2245 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); 2246 if (!sk) 2247 return -ENOMEM; 2248 2249 iucv_sock_init(sk, NULL); 2250 2251 return 0; 2252 } 2253 2254 static const struct net_proto_family iucv_sock_family_ops = { 2255 .family = AF_IUCV, 2256 .owner = THIS_MODULE, 2257 .create = iucv_sock_create, 2258 }; 2259 2260 static struct packet_type iucv_packet_type = { 2261 .type = cpu_to_be16(ETH_P_AF_IUCV), 2262 .func = afiucv_hs_rcv, 2263 }; 2264 2265 static int __init afiucv_init(void) 2266 { 2267 int err; 2268 2269 if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) { 2270 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 2271 if (unlikely(err)) { 2272 WARN_ON(err); 2273 err = -EPROTONOSUPPORT; 2274 goto out; 2275 } 2276 2277 pr_iucv = &iucv_if; 2278 } else { 2279 memset(&iucv_userid, 0, sizeof(iucv_userid)); 2280 pr_iucv = NULL; 2281 } 2282 2283 err = proto_register(&iucv_proto, 0); 2284 if (err) 2285 goto out; 2286 err = sock_register(&iucv_sock_family_ops); 2287 if (err) 2288 goto out_proto; 2289 2290 if (pr_iucv) { 2291 err = pr_iucv->iucv_register(&af_iucv_handler, 0); 2292 if (err) 2293 goto out_sock; 2294 } 2295 2296 err = register_netdevice_notifier(&afiucv_netdev_notifier); 2297 if (err) 2298 goto out_notifier; 2299 2300 dev_add_pack(&iucv_packet_type); 2301 return 0; 2302 2303 out_notifier: 2304 if (pr_iucv) 2305 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2306 out_sock: 2307 sock_unregister(PF_IUCV); 2308 out_proto: 2309 proto_unregister(&iucv_proto); 2310 out: 2311 return err; 2312 } 2313 2314 static void __exit afiucv_exit(void) 2315 { 2316 if (pr_iucv) 2317 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2318 2319 unregister_netdevice_notifier(&afiucv_netdev_notifier); 2320 dev_remove_pack(&iucv_packet_type); 2321 sock_unregister(PF_IUCV); 2322 proto_unregister(&iucv_proto); 2323 } 2324 2325 module_init(afiucv_init); 2326 module_exit(afiucv_exit); 2327 2328 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); 2329 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); 2330 MODULE_VERSION(VERSION); 2331 MODULE_LICENSE("GPL"); 2332 MODULE_ALIAS_NETPROTO(PF_IUCV); 2333