1 /* 2 * IUCV protocol stack for Linux on zSeries 3 * 4 * Copyright IBM Corp. 2006, 2009 5 * 6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 8 * PM functions: 9 * Ursula Braun <ursula.braun@de.ibm.com> 10 */ 11 12 #define KMSG_COMPONENT "af_iucv" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/list.h> 18 #include <linux/errno.h> 19 #include <linux/kernel.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/skbuff.h> 23 #include <linux/init.h> 24 #include <linux/poll.h> 25 #include <net/sock.h> 26 #include <asm/ebcdic.h> 27 #include <asm/cpcmd.h> 28 #include <linux/kmod.h> 29 30 #include <net/iucv/af_iucv.h> 31 32 #define VERSION "1.2" 33 34 static char iucv_userid[80]; 35 36 static const struct proto_ops iucv_sock_ops; 37 38 static struct proto iucv_proto = { 39 .name = "AF_IUCV", 40 .owner = THIS_MODULE, 41 .obj_size = sizeof(struct iucv_sock), 42 }; 43 44 static struct iucv_interface *pr_iucv; 45 46 /* special AF_IUCV IPRM messages */ 47 static const u8 iprm_shutdown[8] = 48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 49 50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 51 52 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 53 do { \ 54 DEFINE_WAIT(__wait); \ 55 long __timeo = timeo; \ 56 ret = 0; \ 57 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ 58 while (!(condition)) { \ 59 if (!__timeo) { \ 60 ret = -EAGAIN; \ 61 break; \ 62 } \ 63 if (signal_pending(current)) { \ 64 ret = sock_intr_errno(__timeo); \ 65 break; \ 66 } \ 67 release_sock(sk); \ 68 __timeo = schedule_timeout(__timeo); \ 69 lock_sock(sk); \ 70 ret = sock_error(sk); \ 71 if (ret) \ 72 break; \ 73 } \ 74 finish_wait(sk_sleep(sk), &__wait); \ 75 } while (0) 76 77 #define iucv_sock_wait(sk, condition, timeo) \ 78 ({ \ 79 int __ret = 0; \ 80 if (!(condition)) \ 81 __iucv_sock_wait(sk, condition, timeo, __ret); \ 82 __ret; \ 83 }) 84 85 static void iucv_sock_kill(struct sock *sk); 86 static void iucv_sock_close(struct sock *sk); 87 static void iucv_sever_path(struct sock *, int); 88 89 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 90 struct packet_type *pt, struct net_device *orig_dev); 91 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 92 struct sk_buff *skb, u8 flags); 93 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); 94 95 /* Call Back functions */ 96 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 97 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 98 static void iucv_callback_connack(struct iucv_path *, u8 *); 99 static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *); 100 static void iucv_callback_connrej(struct iucv_path *, u8 *); 101 static void iucv_callback_shutdown(struct iucv_path *, u8 *); 102 103 static struct iucv_sock_list iucv_sk_list = { 104 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 105 .autobind_name = ATOMIC_INIT(0) 106 }; 107 108 static struct iucv_handler af_iucv_handler = { 109 .path_pending = iucv_callback_connreq, 110 .path_complete = iucv_callback_connack, 111 .path_severed = iucv_callback_connrej, 112 .message_pending = iucv_callback_rx, 113 .message_complete = iucv_callback_txdone, 114 .path_quiesced = iucv_callback_shutdown, 115 }; 116 117 static inline void high_nmcpy(unsigned char *dst, char *src) 118 { 119 memcpy(dst, src, 8); 120 } 121 122 static inline void low_nmcpy(unsigned char *dst, char *src) 123 { 124 memcpy(&dst[8], src, 8); 125 } 126 127 static int afiucv_pm_prepare(struct device *dev) 128 { 129 #ifdef CONFIG_PM_DEBUG 130 printk(KERN_WARNING "afiucv_pm_prepare\n"); 131 #endif 132 return 0; 133 } 134 135 static void afiucv_pm_complete(struct device *dev) 136 { 137 #ifdef CONFIG_PM_DEBUG 138 printk(KERN_WARNING "afiucv_pm_complete\n"); 139 #endif 140 } 141 142 /** 143 * afiucv_pm_freeze() - Freeze PM callback 144 * @dev: AFIUCV dummy device 145 * 146 * Sever all established IUCV communication pathes 147 */ 148 static int afiucv_pm_freeze(struct device *dev) 149 { 150 struct iucv_sock *iucv; 151 struct sock *sk; 152 int err = 0; 153 154 #ifdef CONFIG_PM_DEBUG 155 printk(KERN_WARNING "afiucv_pm_freeze\n"); 156 #endif 157 read_lock(&iucv_sk_list.lock); 158 sk_for_each(sk, &iucv_sk_list.head) { 159 iucv = iucv_sk(sk); 160 switch (sk->sk_state) { 161 case IUCV_DISCONN: 162 case IUCV_CLOSING: 163 case IUCV_CONNECTED: 164 iucv_sever_path(sk, 0); 165 break; 166 case IUCV_OPEN: 167 case IUCV_BOUND: 168 case IUCV_LISTEN: 169 case IUCV_CLOSED: 170 default: 171 break; 172 } 173 skb_queue_purge(&iucv->send_skb_q); 174 skb_queue_purge(&iucv->backlog_skb_q); 175 } 176 read_unlock(&iucv_sk_list.lock); 177 return err; 178 } 179 180 /** 181 * afiucv_pm_restore_thaw() - Thaw and restore PM callback 182 * @dev: AFIUCV dummy device 183 * 184 * socket clean up after freeze 185 */ 186 static int afiucv_pm_restore_thaw(struct device *dev) 187 { 188 struct sock *sk; 189 190 #ifdef CONFIG_PM_DEBUG 191 printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); 192 #endif 193 read_lock(&iucv_sk_list.lock); 194 sk_for_each(sk, &iucv_sk_list.head) { 195 switch (sk->sk_state) { 196 case IUCV_CONNECTED: 197 sk->sk_err = EPIPE; 198 sk->sk_state = IUCV_DISCONN; 199 sk->sk_state_change(sk); 200 break; 201 case IUCV_DISCONN: 202 case IUCV_CLOSING: 203 case IUCV_LISTEN: 204 case IUCV_BOUND: 205 case IUCV_OPEN: 206 default: 207 break; 208 } 209 } 210 read_unlock(&iucv_sk_list.lock); 211 return 0; 212 } 213 214 static const struct dev_pm_ops afiucv_pm_ops = { 215 .prepare = afiucv_pm_prepare, 216 .complete = afiucv_pm_complete, 217 .freeze = afiucv_pm_freeze, 218 .thaw = afiucv_pm_restore_thaw, 219 .restore = afiucv_pm_restore_thaw, 220 }; 221 222 static struct device_driver af_iucv_driver = { 223 .owner = THIS_MODULE, 224 .name = "afiucv", 225 .bus = NULL, 226 .pm = &afiucv_pm_ops, 227 }; 228 229 /* dummy device used as trigger for PM functions */ 230 static struct device *af_iucv_dev; 231 232 /** 233 * iucv_msg_length() - Returns the length of an iucv message. 234 * @msg: Pointer to struct iucv_message, MUST NOT be NULL 235 * 236 * The function returns the length of the specified iucv message @msg of data 237 * stored in a buffer and of data stored in the parameter list (PRMDATA). 238 * 239 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket 240 * data: 241 * PRMDATA[0..6] socket data (max 7 bytes); 242 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) 243 * 244 * The socket data length is computed by subtracting the socket data length 245 * value from 0xFF. 246 * If the socket data len is greater 7, then PRMDATA can be used for special 247 * notifications (see iucv_sock_shutdown); and further, 248 * if the socket data len is > 7, the function returns 8. 249 * 250 * Use this function to allocate socket buffers to store iucv message data. 251 */ 252 static inline size_t iucv_msg_length(struct iucv_message *msg) 253 { 254 size_t datalen; 255 256 if (msg->flags & IUCV_IPRMDATA) { 257 datalen = 0xff - msg->rmmsg[7]; 258 return (datalen < 8) ? datalen : 8; 259 } 260 return msg->length; 261 } 262 263 /** 264 * iucv_sock_in_state() - check for specific states 265 * @sk: sock structure 266 * @state: first iucv sk state 267 * @state: second iucv sk state 268 * 269 * Returns true if the socket in either in the first or second state. 270 */ 271 static int iucv_sock_in_state(struct sock *sk, int state, int state2) 272 { 273 return (sk->sk_state == state || sk->sk_state == state2); 274 } 275 276 /** 277 * iucv_below_msglim() - function to check if messages can be sent 278 * @sk: sock structure 279 * 280 * Returns true if the send queue length is lower than the message limit. 281 * Always returns true if the socket is not connected (no iucv path for 282 * checking the message limit). 283 */ 284 static inline int iucv_below_msglim(struct sock *sk) 285 { 286 struct iucv_sock *iucv = iucv_sk(sk); 287 288 if (sk->sk_state != IUCV_CONNECTED) 289 return 1; 290 if (iucv->transport == AF_IUCV_TRANS_IUCV) 291 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 292 else 293 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && 294 (atomic_read(&iucv->pendings) <= 0)); 295 } 296 297 /** 298 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit 299 */ 300 static void iucv_sock_wake_msglim(struct sock *sk) 301 { 302 struct socket_wq *wq; 303 304 rcu_read_lock(); 305 wq = rcu_dereference(sk->sk_wq); 306 if (skwq_has_sleeper(wq)) 307 wake_up_interruptible_all(&wq->wait); 308 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 309 rcu_read_unlock(); 310 } 311 312 /** 313 * afiucv_hs_send() - send a message through HiperSockets transport 314 */ 315 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 316 struct sk_buff *skb, u8 flags) 317 { 318 struct iucv_sock *iucv = iucv_sk(sock); 319 struct af_iucv_trans_hdr *phs_hdr; 320 struct sk_buff *nskb; 321 int err, confirm_recv = 0; 322 323 memset(skb->head, 0, ETH_HLEN); 324 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb, 325 sizeof(struct af_iucv_trans_hdr)); 326 skb_reset_mac_header(skb); 327 skb_reset_network_header(skb); 328 skb_push(skb, ETH_HLEN); 329 skb_reset_mac_header(skb); 330 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); 331 332 phs_hdr->magic = ETH_P_AF_IUCV; 333 phs_hdr->version = 1; 334 phs_hdr->flags = flags; 335 if (flags == AF_IUCV_FLAG_SYN) 336 phs_hdr->window = iucv->msglimit; 337 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { 338 confirm_recv = atomic_read(&iucv->msg_recv); 339 phs_hdr->window = confirm_recv; 340 if (confirm_recv) 341 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; 342 } 343 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); 344 memcpy(phs_hdr->destAppName, iucv->dst_name, 8); 345 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); 346 memcpy(phs_hdr->srcAppName, iucv->src_name, 8); 347 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); 348 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); 349 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); 350 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); 351 if (imsg) 352 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 353 354 skb->dev = iucv->hs_dev; 355 if (!skb->dev) 356 return -ENODEV; 357 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) 358 return -ENETDOWN; 359 if (skb->len > skb->dev->mtu) { 360 if (sock->sk_type == SOCK_SEQPACKET) 361 return -EMSGSIZE; 362 else 363 skb_trim(skb, skb->dev->mtu); 364 } 365 skb->protocol = ETH_P_AF_IUCV; 366 nskb = skb_clone(skb, GFP_ATOMIC); 367 if (!nskb) 368 return -ENOMEM; 369 skb_queue_tail(&iucv->send_skb_q, nskb); 370 err = dev_queue_xmit(skb); 371 if (net_xmit_eval(err)) { 372 skb_unlink(nskb, &iucv->send_skb_q); 373 kfree_skb(nskb); 374 } else { 375 atomic_sub(confirm_recv, &iucv->msg_recv); 376 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 377 } 378 return net_xmit_eval(err); 379 } 380 381 static struct sock *__iucv_get_sock_by_name(char *nm) 382 { 383 struct sock *sk; 384 385 sk_for_each(sk, &iucv_sk_list.head) 386 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 387 return sk; 388 389 return NULL; 390 } 391 392 static void iucv_sock_destruct(struct sock *sk) 393 { 394 skb_queue_purge(&sk->sk_receive_queue); 395 skb_queue_purge(&sk->sk_error_queue); 396 397 sk_mem_reclaim(sk); 398 399 if (!sock_flag(sk, SOCK_DEAD)) { 400 pr_err("Attempt to release alive iucv socket %p\n", sk); 401 return; 402 } 403 404 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 405 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 406 WARN_ON(sk->sk_wmem_queued); 407 WARN_ON(sk->sk_forward_alloc); 408 } 409 410 /* Cleanup Listen */ 411 static void iucv_sock_cleanup_listen(struct sock *parent) 412 { 413 struct sock *sk; 414 415 /* Close non-accepted connections */ 416 while ((sk = iucv_accept_dequeue(parent, NULL))) { 417 iucv_sock_close(sk); 418 iucv_sock_kill(sk); 419 } 420 421 parent->sk_state = IUCV_CLOSED; 422 } 423 424 /* Kill socket (only if zapped and orphaned) */ 425 static void iucv_sock_kill(struct sock *sk) 426 { 427 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 428 return; 429 430 iucv_sock_unlink(&iucv_sk_list, sk); 431 sock_set_flag(sk, SOCK_DEAD); 432 sock_put(sk); 433 } 434 435 /* Terminate an IUCV path */ 436 static void iucv_sever_path(struct sock *sk, int with_user_data) 437 { 438 unsigned char user_data[16]; 439 struct iucv_sock *iucv = iucv_sk(sk); 440 struct iucv_path *path = iucv->path; 441 442 if (iucv->path) { 443 iucv->path = NULL; 444 if (with_user_data) { 445 low_nmcpy(user_data, iucv->src_name); 446 high_nmcpy(user_data, iucv->dst_name); 447 ASCEBC(user_data, sizeof(user_data)); 448 pr_iucv->path_sever(path, user_data); 449 } else 450 pr_iucv->path_sever(path, NULL); 451 iucv_path_free(path); 452 } 453 } 454 455 /* Send FIN through an IUCV socket for HIPER transport */ 456 static int iucv_send_ctrl(struct sock *sk, u8 flags) 457 { 458 int err = 0; 459 int blen; 460 struct sk_buff *skb; 461 462 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; 463 skb = sock_alloc_send_skb(sk, blen, 1, &err); 464 if (skb) { 465 skb_reserve(skb, blen); 466 err = afiucv_hs_send(NULL, sk, skb, flags); 467 } 468 return err; 469 } 470 471 /* Close an IUCV socket */ 472 static void iucv_sock_close(struct sock *sk) 473 { 474 struct iucv_sock *iucv = iucv_sk(sk); 475 unsigned long timeo; 476 int err = 0; 477 478 lock_sock(sk); 479 480 switch (sk->sk_state) { 481 case IUCV_LISTEN: 482 iucv_sock_cleanup_listen(sk); 483 break; 484 485 case IUCV_CONNECTED: 486 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 487 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 488 sk->sk_state = IUCV_DISCONN; 489 sk->sk_state_change(sk); 490 } 491 case IUCV_DISCONN: /* fall through */ 492 sk->sk_state = IUCV_CLOSING; 493 sk->sk_state_change(sk); 494 495 if (!err && !skb_queue_empty(&iucv->send_skb_q)) { 496 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 497 timeo = sk->sk_lingertime; 498 else 499 timeo = IUCV_DISCONN_TIMEOUT; 500 iucv_sock_wait(sk, 501 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 502 timeo); 503 } 504 505 case IUCV_CLOSING: /* fall through */ 506 sk->sk_state = IUCV_CLOSED; 507 sk->sk_state_change(sk); 508 509 sk->sk_err = ECONNRESET; 510 sk->sk_state_change(sk); 511 512 skb_queue_purge(&iucv->send_skb_q); 513 skb_queue_purge(&iucv->backlog_skb_q); 514 515 default: /* fall through */ 516 iucv_sever_path(sk, 1); 517 } 518 519 if (iucv->hs_dev) { 520 dev_put(iucv->hs_dev); 521 iucv->hs_dev = NULL; 522 sk->sk_bound_dev_if = 0; 523 } 524 525 /* mark socket for deletion by iucv_sock_kill() */ 526 sock_set_flag(sk, SOCK_ZAPPED); 527 528 release_sock(sk); 529 } 530 531 static void iucv_sock_init(struct sock *sk, struct sock *parent) 532 { 533 if (parent) 534 sk->sk_type = parent->sk_type; 535 } 536 537 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) 538 { 539 struct sock *sk; 540 struct iucv_sock *iucv; 541 542 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); 543 if (!sk) 544 return NULL; 545 iucv = iucv_sk(sk); 546 547 sock_init_data(sock, sk); 548 INIT_LIST_HEAD(&iucv->accept_q); 549 spin_lock_init(&iucv->accept_q_lock); 550 skb_queue_head_init(&iucv->send_skb_q); 551 INIT_LIST_HEAD(&iucv->message_q.list); 552 spin_lock_init(&iucv->message_q.lock); 553 skb_queue_head_init(&iucv->backlog_skb_q); 554 iucv->send_tag = 0; 555 atomic_set(&iucv->pendings, 0); 556 iucv->flags = 0; 557 iucv->msglimit = 0; 558 atomic_set(&iucv->msg_sent, 0); 559 atomic_set(&iucv->msg_recv, 0); 560 iucv->path = NULL; 561 iucv->sk_txnotify = afiucv_hs_callback_txnotify; 562 memset(&iucv->src_user_id , 0, 32); 563 if (pr_iucv) 564 iucv->transport = AF_IUCV_TRANS_IUCV; 565 else 566 iucv->transport = AF_IUCV_TRANS_HIPER; 567 568 sk->sk_destruct = iucv_sock_destruct; 569 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 570 sk->sk_allocation = GFP_DMA; 571 572 sock_reset_flag(sk, SOCK_ZAPPED); 573 574 sk->sk_protocol = proto; 575 sk->sk_state = IUCV_OPEN; 576 577 iucv_sock_link(&iucv_sk_list, sk); 578 return sk; 579 } 580 581 /* Create an IUCV socket */ 582 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, 583 int kern) 584 { 585 struct sock *sk; 586 587 if (protocol && protocol != PF_IUCV) 588 return -EPROTONOSUPPORT; 589 590 sock->state = SS_UNCONNECTED; 591 592 switch (sock->type) { 593 case SOCK_STREAM: 594 sock->ops = &iucv_sock_ops; 595 break; 596 case SOCK_SEQPACKET: 597 /* currently, proto ops can handle both sk types */ 598 sock->ops = &iucv_sock_ops; 599 break; 600 default: 601 return -ESOCKTNOSUPPORT; 602 } 603 604 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); 605 if (!sk) 606 return -ENOMEM; 607 608 iucv_sock_init(sk, NULL); 609 610 return 0; 611 } 612 613 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) 614 { 615 write_lock_bh(&l->lock); 616 sk_add_node(sk, &l->head); 617 write_unlock_bh(&l->lock); 618 } 619 620 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) 621 { 622 write_lock_bh(&l->lock); 623 sk_del_node_init(sk); 624 write_unlock_bh(&l->lock); 625 } 626 627 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 628 { 629 unsigned long flags; 630 struct iucv_sock *par = iucv_sk(parent); 631 632 sock_hold(sk); 633 spin_lock_irqsave(&par->accept_q_lock, flags); 634 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); 635 spin_unlock_irqrestore(&par->accept_q_lock, flags); 636 iucv_sk(sk)->parent = parent; 637 sk_acceptq_added(parent); 638 } 639 640 void iucv_accept_unlink(struct sock *sk) 641 { 642 unsigned long flags; 643 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); 644 645 spin_lock_irqsave(&par->accept_q_lock, flags); 646 list_del_init(&iucv_sk(sk)->accept_q); 647 spin_unlock_irqrestore(&par->accept_q_lock, flags); 648 sk_acceptq_removed(iucv_sk(sk)->parent); 649 iucv_sk(sk)->parent = NULL; 650 sock_put(sk); 651 } 652 653 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) 654 { 655 struct iucv_sock *isk, *n; 656 struct sock *sk; 657 658 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 659 sk = (struct sock *) isk; 660 lock_sock(sk); 661 662 if (sk->sk_state == IUCV_CLOSED) { 663 iucv_accept_unlink(sk); 664 release_sock(sk); 665 continue; 666 } 667 668 if (sk->sk_state == IUCV_CONNECTED || 669 sk->sk_state == IUCV_DISCONN || 670 !newsock) { 671 iucv_accept_unlink(sk); 672 if (newsock) 673 sock_graft(sk, newsock); 674 675 release_sock(sk); 676 return sk; 677 } 678 679 release_sock(sk); 680 } 681 return NULL; 682 } 683 684 static void __iucv_auto_name(struct iucv_sock *iucv) 685 { 686 char name[12]; 687 688 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); 689 while (__iucv_get_sock_by_name(name)) { 690 sprintf(name, "%08x", 691 atomic_inc_return(&iucv_sk_list.autobind_name)); 692 } 693 memcpy(iucv->src_name, name, 8); 694 } 695 696 /* Bind an unbound socket */ 697 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, 698 int addr_len) 699 { 700 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 701 struct sock *sk = sock->sk; 702 struct iucv_sock *iucv; 703 int err = 0; 704 struct net_device *dev; 705 char uid[9]; 706 707 /* Verify the input sockaddr */ 708 if (!addr || addr->sa_family != AF_IUCV) 709 return -EINVAL; 710 711 if (addr_len < sizeof(struct sockaddr_iucv)) 712 return -EINVAL; 713 714 lock_sock(sk); 715 if (sk->sk_state != IUCV_OPEN) { 716 err = -EBADFD; 717 goto done; 718 } 719 720 write_lock_bh(&iucv_sk_list.lock); 721 722 iucv = iucv_sk(sk); 723 if (__iucv_get_sock_by_name(sa->siucv_name)) { 724 err = -EADDRINUSE; 725 goto done_unlock; 726 } 727 if (iucv->path) 728 goto done_unlock; 729 730 /* Bind the socket */ 731 if (pr_iucv) 732 if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) 733 goto vm_bind; /* VM IUCV transport */ 734 735 /* try hiper transport */ 736 memcpy(uid, sa->siucv_user_id, sizeof(uid)); 737 ASCEBC(uid, 8); 738 rcu_read_lock(); 739 for_each_netdev_rcu(&init_net, dev) { 740 if (!memcmp(dev->perm_addr, uid, 8)) { 741 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 742 /* Check for unitialized siucv_name */ 743 if (strncmp(sa->siucv_name, " ", 8) == 0) 744 __iucv_auto_name(iucv); 745 else 746 memcpy(iucv->src_name, sa->siucv_name, 8); 747 sk->sk_bound_dev_if = dev->ifindex; 748 iucv->hs_dev = dev; 749 dev_hold(dev); 750 sk->sk_state = IUCV_BOUND; 751 iucv->transport = AF_IUCV_TRANS_HIPER; 752 if (!iucv->msglimit) 753 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; 754 rcu_read_unlock(); 755 goto done_unlock; 756 } 757 } 758 rcu_read_unlock(); 759 vm_bind: 760 if (pr_iucv) { 761 /* use local userid for backward compat */ 762 memcpy(iucv->src_name, sa->siucv_name, 8); 763 memcpy(iucv->src_user_id, iucv_userid, 8); 764 sk->sk_state = IUCV_BOUND; 765 iucv->transport = AF_IUCV_TRANS_IUCV; 766 if (!iucv->msglimit) 767 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 768 goto done_unlock; 769 } 770 /* found no dev to bind */ 771 err = -ENODEV; 772 done_unlock: 773 /* Release the socket list lock */ 774 write_unlock_bh(&iucv_sk_list.lock); 775 done: 776 release_sock(sk); 777 return err; 778 } 779 780 /* Automatically bind an unbound socket */ 781 static int iucv_sock_autobind(struct sock *sk) 782 { 783 struct iucv_sock *iucv = iucv_sk(sk); 784 int err = 0; 785 786 if (unlikely(!pr_iucv)) 787 return -EPROTO; 788 789 memcpy(iucv->src_user_id, iucv_userid, 8); 790 791 write_lock_bh(&iucv_sk_list.lock); 792 __iucv_auto_name(iucv); 793 write_unlock_bh(&iucv_sk_list.lock); 794 795 if (!iucv->msglimit) 796 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 797 798 return err; 799 } 800 801 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) 802 { 803 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 804 struct sock *sk = sock->sk; 805 struct iucv_sock *iucv = iucv_sk(sk); 806 unsigned char user_data[16]; 807 int err; 808 809 high_nmcpy(user_data, sa->siucv_name); 810 low_nmcpy(user_data, iucv->src_name); 811 ASCEBC(user_data, sizeof(user_data)); 812 813 /* Create path. */ 814 iucv->path = iucv_path_alloc(iucv->msglimit, 815 IUCV_IPRMDATA, GFP_KERNEL); 816 if (!iucv->path) { 817 err = -ENOMEM; 818 goto done; 819 } 820 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, 821 sa->siucv_user_id, NULL, user_data, 822 sk); 823 if (err) { 824 iucv_path_free(iucv->path); 825 iucv->path = NULL; 826 switch (err) { 827 case 0x0b: /* Target communicator is not logged on */ 828 err = -ENETUNREACH; 829 break; 830 case 0x0d: /* Max connections for this guest exceeded */ 831 case 0x0e: /* Max connections for target guest exceeded */ 832 err = -EAGAIN; 833 break; 834 case 0x0f: /* Missing IUCV authorization */ 835 err = -EACCES; 836 break; 837 default: 838 err = -ECONNREFUSED; 839 break; 840 } 841 } 842 done: 843 return err; 844 } 845 846 /* Connect an unconnected socket */ 847 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 848 int alen, int flags) 849 { 850 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; 851 struct sock *sk = sock->sk; 852 struct iucv_sock *iucv = iucv_sk(sk); 853 int err; 854 855 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) 856 return -EINVAL; 857 858 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 859 return -EBADFD; 860 861 if (sk->sk_state == IUCV_OPEN && 862 iucv->transport == AF_IUCV_TRANS_HIPER) 863 return -EBADFD; /* explicit bind required */ 864 865 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) 866 return -EINVAL; 867 868 if (sk->sk_state == IUCV_OPEN) { 869 err = iucv_sock_autobind(sk); 870 if (unlikely(err)) 871 return err; 872 } 873 874 lock_sock(sk); 875 876 /* Set the destination information */ 877 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); 878 memcpy(iucv->dst_name, sa->siucv_name, 8); 879 880 if (iucv->transport == AF_IUCV_TRANS_HIPER) 881 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); 882 else 883 err = afiucv_path_connect(sock, addr); 884 if (err) 885 goto done; 886 887 if (sk->sk_state != IUCV_CONNECTED) 888 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 889 IUCV_DISCONN), 890 sock_sndtimeo(sk, flags & O_NONBLOCK)); 891 892 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) 893 err = -ECONNREFUSED; 894 895 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) 896 iucv_sever_path(sk, 0); 897 898 done: 899 release_sock(sk); 900 return err; 901 } 902 903 /* Move a socket into listening state. */ 904 static int iucv_sock_listen(struct socket *sock, int backlog) 905 { 906 struct sock *sk = sock->sk; 907 int err; 908 909 lock_sock(sk); 910 911 err = -EINVAL; 912 if (sk->sk_state != IUCV_BOUND) 913 goto done; 914 915 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 916 goto done; 917 918 sk->sk_max_ack_backlog = backlog; 919 sk->sk_ack_backlog = 0; 920 sk->sk_state = IUCV_LISTEN; 921 err = 0; 922 923 done: 924 release_sock(sk); 925 return err; 926 } 927 928 /* Accept a pending connection */ 929 static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 930 int flags) 931 { 932 DECLARE_WAITQUEUE(wait, current); 933 struct sock *sk = sock->sk, *nsk; 934 long timeo; 935 int err = 0; 936 937 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 938 939 if (sk->sk_state != IUCV_LISTEN) { 940 err = -EBADFD; 941 goto done; 942 } 943 944 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 945 946 /* Wait for an incoming connection */ 947 add_wait_queue_exclusive(sk_sleep(sk), &wait); 948 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 949 set_current_state(TASK_INTERRUPTIBLE); 950 if (!timeo) { 951 err = -EAGAIN; 952 break; 953 } 954 955 release_sock(sk); 956 timeo = schedule_timeout(timeo); 957 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 958 959 if (sk->sk_state != IUCV_LISTEN) { 960 err = -EBADFD; 961 break; 962 } 963 964 if (signal_pending(current)) { 965 err = sock_intr_errno(timeo); 966 break; 967 } 968 } 969 970 set_current_state(TASK_RUNNING); 971 remove_wait_queue(sk_sleep(sk), &wait); 972 973 if (err) 974 goto done; 975 976 newsock->state = SS_CONNECTED; 977 978 done: 979 release_sock(sk); 980 return err; 981 } 982 983 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, 984 int *len, int peer) 985 { 986 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; 987 struct sock *sk = sock->sk; 988 struct iucv_sock *iucv = iucv_sk(sk); 989 990 addr->sa_family = AF_IUCV; 991 *len = sizeof(struct sockaddr_iucv); 992 993 if (peer) { 994 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); 995 memcpy(siucv->siucv_name, iucv->dst_name, 8); 996 } else { 997 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); 998 memcpy(siucv->siucv_name, iucv->src_name, 8); 999 } 1000 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 1001 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 1002 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 1003 1004 return 0; 1005 } 1006 1007 /** 1008 * iucv_send_iprm() - Send socket data in parameter list of an iucv message. 1009 * @path: IUCV path 1010 * @msg: Pointer to a struct iucv_message 1011 * @skb: The socket data to send, skb->len MUST BE <= 7 1012 * 1013 * Send the socket data in the parameter list in the iucv message 1014 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter 1015 * list and the socket data len at index 7 (last byte). 1016 * See also iucv_msg_length(). 1017 * 1018 * Returns the error code from the iucv_message_send() call. 1019 */ 1020 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, 1021 struct sk_buff *skb) 1022 { 1023 u8 prmdata[8]; 1024 1025 memcpy(prmdata, (void *) skb->data, skb->len); 1026 prmdata[7] = 0xff - (u8) skb->len; 1027 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, 1028 (void *) prmdata, 8); 1029 } 1030 1031 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, 1032 size_t len) 1033 { 1034 struct sock *sk = sock->sk; 1035 struct iucv_sock *iucv = iucv_sk(sk); 1036 size_t headroom, linear; 1037 struct sk_buff *skb; 1038 struct iucv_message txmsg = {0}; 1039 struct cmsghdr *cmsg; 1040 int cmsg_done; 1041 long timeo; 1042 char user_id[9]; 1043 char appl_id[9]; 1044 int err; 1045 int noblock = msg->msg_flags & MSG_DONTWAIT; 1046 1047 err = sock_error(sk); 1048 if (err) 1049 return err; 1050 1051 if (msg->msg_flags & MSG_OOB) 1052 return -EOPNOTSUPP; 1053 1054 /* SOCK_SEQPACKET: we do not support segmented records */ 1055 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) 1056 return -EOPNOTSUPP; 1057 1058 lock_sock(sk); 1059 1060 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1061 err = -EPIPE; 1062 goto out; 1063 } 1064 1065 /* Return if the socket is not in connected state */ 1066 if (sk->sk_state != IUCV_CONNECTED) { 1067 err = -ENOTCONN; 1068 goto out; 1069 } 1070 1071 /* initialize defaults */ 1072 cmsg_done = 0; /* check for duplicate headers */ 1073 txmsg.class = 0; 1074 1075 /* iterate over control messages */ 1076 for_each_cmsghdr(cmsg, msg) { 1077 if (!CMSG_OK(msg, cmsg)) { 1078 err = -EINVAL; 1079 goto out; 1080 } 1081 1082 if (cmsg->cmsg_level != SOL_IUCV) 1083 continue; 1084 1085 if (cmsg->cmsg_type & cmsg_done) { 1086 err = -EINVAL; 1087 goto out; 1088 } 1089 cmsg_done |= cmsg->cmsg_type; 1090 1091 switch (cmsg->cmsg_type) { 1092 case SCM_IUCV_TRGCLS: 1093 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { 1094 err = -EINVAL; 1095 goto out; 1096 } 1097 1098 /* set iucv message target class */ 1099 memcpy(&txmsg.class, 1100 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); 1101 1102 break; 1103 1104 default: 1105 err = -EINVAL; 1106 goto out; 1107 } 1108 } 1109 1110 /* allocate one skb for each iucv message: 1111 * this is fine for SOCK_SEQPACKET (unless we want to support 1112 * segmented records using the MSG_EOR flag), but 1113 * for SOCK_STREAM we might want to improve it in future */ 1114 headroom = (iucv->transport == AF_IUCV_TRANS_HIPER) 1115 ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0; 1116 if (headroom + len < PAGE_SIZE) { 1117 linear = len; 1118 } else { 1119 /* In nonlinear "classic" iucv skb, 1120 * reserve space for iucv_array 1121 */ 1122 if (iucv->transport != AF_IUCV_TRANS_HIPER) 1123 headroom += sizeof(struct iucv_array) * 1124 (MAX_SKB_FRAGS + 1); 1125 linear = PAGE_SIZE - headroom; 1126 } 1127 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, 1128 noblock, &err, 0); 1129 if (!skb) 1130 goto out; 1131 if (headroom) 1132 skb_reserve(skb, headroom); 1133 skb_put(skb, linear); 1134 skb->len = len; 1135 skb->data_len = len - linear; 1136 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); 1137 if (err) 1138 goto fail; 1139 1140 /* wait if outstanding messages for iucv path has reached */ 1141 timeo = sock_sndtimeo(sk, noblock); 1142 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); 1143 if (err) 1144 goto fail; 1145 1146 /* return -ECONNRESET if the socket is no longer connected */ 1147 if (sk->sk_state != IUCV_CONNECTED) { 1148 err = -ECONNRESET; 1149 goto fail; 1150 } 1151 1152 /* increment and save iucv message tag for msg_completion cbk */ 1153 txmsg.tag = iucv->send_tag++; 1154 IUCV_SKB_CB(skb)->tag = txmsg.tag; 1155 1156 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1157 atomic_inc(&iucv->msg_sent); 1158 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1159 if (err) { 1160 atomic_dec(&iucv->msg_sent); 1161 goto fail; 1162 } 1163 } else { /* Classic VM IUCV transport */ 1164 skb_queue_tail(&iucv->send_skb_q, skb); 1165 1166 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && 1167 skb->len <= 7) { 1168 err = iucv_send_iprm(iucv->path, &txmsg, skb); 1169 1170 /* on success: there is no message_complete callback */ 1171 /* for an IPRMDATA msg; remove skb from send queue */ 1172 if (err == 0) { 1173 skb_unlink(skb, &iucv->send_skb_q); 1174 kfree_skb(skb); 1175 } 1176 1177 /* this error should never happen since the */ 1178 /* IUCV_IPRMDATA path flag is set... sever path */ 1179 if (err == 0x15) { 1180 pr_iucv->path_sever(iucv->path, NULL); 1181 skb_unlink(skb, &iucv->send_skb_q); 1182 err = -EPIPE; 1183 goto fail; 1184 } 1185 } else if (skb_is_nonlinear(skb)) { 1186 struct iucv_array *iba = (struct iucv_array *)skb->head; 1187 int i; 1188 1189 /* skip iucv_array lying in the headroom */ 1190 iba[0].address = (u32)(addr_t)skb->data; 1191 iba[0].length = (u32)skb_headlen(skb); 1192 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1193 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1194 1195 iba[i + 1].address = 1196 (u32)(addr_t)skb_frag_address(frag); 1197 iba[i + 1].length = (u32)skb_frag_size(frag); 1198 } 1199 err = pr_iucv->message_send(iucv->path, &txmsg, 1200 IUCV_IPBUFLST, 0, 1201 (void *)iba, skb->len); 1202 } else { /* non-IPRM Linear skb */ 1203 err = pr_iucv->message_send(iucv->path, &txmsg, 1204 0, 0, (void *)skb->data, skb->len); 1205 } 1206 if (err) { 1207 if (err == 3) { 1208 user_id[8] = 0; 1209 memcpy(user_id, iucv->dst_user_id, 8); 1210 appl_id[8] = 0; 1211 memcpy(appl_id, iucv->dst_name, 8); 1212 pr_err( 1213 "Application %s on z/VM guest %s exceeds message limit\n", 1214 appl_id, user_id); 1215 err = -EAGAIN; 1216 } else { 1217 err = -EPIPE; 1218 } 1219 skb_unlink(skb, &iucv->send_skb_q); 1220 goto fail; 1221 } 1222 } 1223 1224 release_sock(sk); 1225 return len; 1226 1227 fail: 1228 kfree_skb(skb); 1229 out: 1230 release_sock(sk); 1231 return err; 1232 } 1233 1234 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) 1235 { 1236 size_t headroom, linear; 1237 struct sk_buff *skb; 1238 int err; 1239 1240 if (len < PAGE_SIZE) { 1241 headroom = 0; 1242 linear = len; 1243 } else { 1244 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); 1245 linear = PAGE_SIZE - headroom; 1246 } 1247 skb = alloc_skb_with_frags(headroom + linear, len - linear, 1248 0, &err, GFP_ATOMIC | GFP_DMA); 1249 WARN_ONCE(!skb, 1250 "alloc of recv iucv skb len=%lu failed with errcode=%d\n", 1251 len, err); 1252 if (skb) { 1253 if (headroom) 1254 skb_reserve(skb, headroom); 1255 skb_put(skb, linear); 1256 skb->len = len; 1257 skb->data_len = len - linear; 1258 } 1259 return skb; 1260 } 1261 1262 /* iucv_process_message() - Receive a single outstanding IUCV message 1263 * 1264 * Locking: must be called with message_q.lock held 1265 */ 1266 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 1267 struct iucv_path *path, 1268 struct iucv_message *msg) 1269 { 1270 int rc; 1271 unsigned int len; 1272 1273 len = iucv_msg_length(msg); 1274 1275 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1276 /* Note: the first 4 bytes are reserved for msg tag */ 1277 IUCV_SKB_CB(skb)->class = msg->class; 1278 1279 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1280 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1281 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { 1282 skb->data = NULL; 1283 skb->len = 0; 1284 } 1285 } else { 1286 if (skb_is_nonlinear(skb)) { 1287 struct iucv_array *iba = (struct iucv_array *)skb->head; 1288 int i; 1289 1290 iba[0].address = (u32)(addr_t)skb->data; 1291 iba[0].length = (u32)skb_headlen(skb); 1292 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1293 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1294 1295 iba[i + 1].address = 1296 (u32)(addr_t)skb_frag_address(frag); 1297 iba[i + 1].length = (u32)skb_frag_size(frag); 1298 } 1299 rc = pr_iucv->message_receive(path, msg, 1300 IUCV_IPBUFLST, 1301 (void *)iba, len, NULL); 1302 } else { 1303 rc = pr_iucv->message_receive(path, msg, 1304 msg->flags & IUCV_IPRMDATA, 1305 skb->data, len, NULL); 1306 } 1307 if (rc) { 1308 kfree_skb(skb); 1309 return; 1310 } 1311 WARN_ON_ONCE(skb->len != len); 1312 } 1313 1314 IUCV_SKB_CB(skb)->offset = 0; 1315 if (sock_queue_rcv_skb(sk, skb)) 1316 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 1317 } 1318 1319 /* iucv_process_message_q() - Process outstanding IUCV messages 1320 * 1321 * Locking: must be called with message_q.lock held 1322 */ 1323 static void iucv_process_message_q(struct sock *sk) 1324 { 1325 struct iucv_sock *iucv = iucv_sk(sk); 1326 struct sk_buff *skb; 1327 struct sock_msg_q *p, *n; 1328 1329 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1330 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); 1331 if (!skb) 1332 break; 1333 iucv_process_message(sk, skb, p->path, &p->msg); 1334 list_del(&p->list); 1335 kfree(p); 1336 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1337 break; 1338 } 1339 } 1340 1341 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, 1342 size_t len, int flags) 1343 { 1344 int noblock = flags & MSG_DONTWAIT; 1345 struct sock *sk = sock->sk; 1346 struct iucv_sock *iucv = iucv_sk(sk); 1347 unsigned int copied, rlen; 1348 struct sk_buff *skb, *rskb, *cskb; 1349 int err = 0; 1350 u32 offset; 1351 1352 if ((sk->sk_state == IUCV_DISCONN) && 1353 skb_queue_empty(&iucv->backlog_skb_q) && 1354 skb_queue_empty(&sk->sk_receive_queue) && 1355 list_empty(&iucv->message_q.list)) 1356 return 0; 1357 1358 if (flags & (MSG_OOB)) 1359 return -EOPNOTSUPP; 1360 1361 /* receive/dequeue next skb: 1362 * the function understands MSG_PEEK and, thus, does not dequeue skb */ 1363 skb = skb_recv_datagram(sk, flags, noblock, &err); 1364 if (!skb) { 1365 if (sk->sk_shutdown & RCV_SHUTDOWN) 1366 return 0; 1367 return err; 1368 } 1369 1370 offset = IUCV_SKB_CB(skb)->offset; 1371 rlen = skb->len - offset; /* real length of skb */ 1372 copied = min_t(unsigned int, rlen, len); 1373 if (!rlen) 1374 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1375 1376 cskb = skb; 1377 if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { 1378 if (!(flags & MSG_PEEK)) 1379 skb_queue_head(&sk->sk_receive_queue, skb); 1380 return -EFAULT; 1381 } 1382 1383 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ 1384 if (sk->sk_type == SOCK_SEQPACKET) { 1385 if (copied < rlen) 1386 msg->msg_flags |= MSG_TRUNC; 1387 /* each iucv message contains a complete record */ 1388 msg->msg_flags |= MSG_EOR; 1389 } 1390 1391 /* create control message to store iucv msg target class: 1392 * get the trgcls from the control buffer of the skb due to 1393 * fragmentation of original iucv message. */ 1394 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1395 sizeof(IUCV_SKB_CB(skb)->class), 1396 (void *)&IUCV_SKB_CB(skb)->class); 1397 if (err) { 1398 if (!(flags & MSG_PEEK)) 1399 skb_queue_head(&sk->sk_receive_queue, skb); 1400 return err; 1401 } 1402 1403 /* Mark read part of skb as used */ 1404 if (!(flags & MSG_PEEK)) { 1405 1406 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1407 if (sk->sk_type == SOCK_STREAM) { 1408 if (copied < rlen) { 1409 IUCV_SKB_CB(skb)->offset = offset + copied; 1410 skb_queue_head(&sk->sk_receive_queue, skb); 1411 goto done; 1412 } 1413 } 1414 1415 kfree_skb(skb); 1416 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1417 atomic_inc(&iucv->msg_recv); 1418 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { 1419 WARN_ON(1); 1420 iucv_sock_close(sk); 1421 return -EFAULT; 1422 } 1423 } 1424 1425 /* Queue backlog skbs */ 1426 spin_lock_bh(&iucv->message_q.lock); 1427 rskb = skb_dequeue(&iucv->backlog_skb_q); 1428 while (rskb) { 1429 IUCV_SKB_CB(rskb)->offset = 0; 1430 if (sock_queue_rcv_skb(sk, rskb)) { 1431 skb_queue_head(&iucv->backlog_skb_q, 1432 rskb); 1433 break; 1434 } else { 1435 rskb = skb_dequeue(&iucv->backlog_skb_q); 1436 } 1437 } 1438 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1439 if (!list_empty(&iucv->message_q.list)) 1440 iucv_process_message_q(sk); 1441 if (atomic_read(&iucv->msg_recv) >= 1442 iucv->msglimit / 2) { 1443 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); 1444 if (err) { 1445 sk->sk_state = IUCV_DISCONN; 1446 sk->sk_state_change(sk); 1447 } 1448 } 1449 } 1450 spin_unlock_bh(&iucv->message_q.lock); 1451 } 1452 1453 done: 1454 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ 1455 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) 1456 copied = rlen; 1457 1458 return copied; 1459 } 1460 1461 static inline unsigned int iucv_accept_poll(struct sock *parent) 1462 { 1463 struct iucv_sock *isk, *n; 1464 struct sock *sk; 1465 1466 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 1467 sk = (struct sock *) isk; 1468 1469 if (sk->sk_state == IUCV_CONNECTED) 1470 return POLLIN | POLLRDNORM; 1471 } 1472 1473 return 0; 1474 } 1475 1476 unsigned int iucv_sock_poll(struct file *file, struct socket *sock, 1477 poll_table *wait) 1478 { 1479 struct sock *sk = sock->sk; 1480 unsigned int mask = 0; 1481 1482 sock_poll_wait(file, sk_sleep(sk), wait); 1483 1484 if (sk->sk_state == IUCV_LISTEN) 1485 return iucv_accept_poll(sk); 1486 1487 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1488 mask |= POLLERR | 1489 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 1490 1491 if (sk->sk_shutdown & RCV_SHUTDOWN) 1492 mask |= POLLRDHUP; 1493 1494 if (sk->sk_shutdown == SHUTDOWN_MASK) 1495 mask |= POLLHUP; 1496 1497 if (!skb_queue_empty(&sk->sk_receive_queue) || 1498 (sk->sk_shutdown & RCV_SHUTDOWN)) 1499 mask |= POLLIN | POLLRDNORM; 1500 1501 if (sk->sk_state == IUCV_CLOSED) 1502 mask |= POLLHUP; 1503 1504 if (sk->sk_state == IUCV_DISCONN) 1505 mask |= POLLIN; 1506 1507 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1508 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1509 else 1510 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1511 1512 return mask; 1513 } 1514 1515 static int iucv_sock_shutdown(struct socket *sock, int how) 1516 { 1517 struct sock *sk = sock->sk; 1518 struct iucv_sock *iucv = iucv_sk(sk); 1519 struct iucv_message txmsg; 1520 int err = 0; 1521 1522 how++; 1523 1524 if ((how & ~SHUTDOWN_MASK) || !how) 1525 return -EINVAL; 1526 1527 lock_sock(sk); 1528 switch (sk->sk_state) { 1529 case IUCV_LISTEN: 1530 case IUCV_DISCONN: 1531 case IUCV_CLOSING: 1532 case IUCV_CLOSED: 1533 err = -ENOTCONN; 1534 goto fail; 1535 default: 1536 break; 1537 } 1538 1539 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { 1540 if (iucv->transport == AF_IUCV_TRANS_IUCV) { 1541 txmsg.class = 0; 1542 txmsg.tag = 0; 1543 err = pr_iucv->message_send(iucv->path, &txmsg, 1544 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); 1545 if (err) { 1546 switch (err) { 1547 case 1: 1548 err = -ENOTCONN; 1549 break; 1550 case 2: 1551 err = -ECONNRESET; 1552 break; 1553 default: 1554 err = -ENOTCONN; 1555 break; 1556 } 1557 } 1558 } else 1559 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); 1560 } 1561 1562 sk->sk_shutdown |= how; 1563 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1564 if ((iucv->transport == AF_IUCV_TRANS_IUCV) && 1565 iucv->path) { 1566 err = pr_iucv->path_quiesce(iucv->path, NULL); 1567 if (err) 1568 err = -ENOTCONN; 1569 /* skb_queue_purge(&sk->sk_receive_queue); */ 1570 } 1571 skb_queue_purge(&sk->sk_receive_queue); 1572 } 1573 1574 /* Wake up anyone sleeping in poll */ 1575 sk->sk_state_change(sk); 1576 1577 fail: 1578 release_sock(sk); 1579 return err; 1580 } 1581 1582 static int iucv_sock_release(struct socket *sock) 1583 { 1584 struct sock *sk = sock->sk; 1585 int err = 0; 1586 1587 if (!sk) 1588 return 0; 1589 1590 iucv_sock_close(sk); 1591 1592 sock_orphan(sk); 1593 iucv_sock_kill(sk); 1594 return err; 1595 } 1596 1597 /* getsockopt and setsockopt */ 1598 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, 1599 char __user *optval, unsigned int optlen) 1600 { 1601 struct sock *sk = sock->sk; 1602 struct iucv_sock *iucv = iucv_sk(sk); 1603 int val; 1604 int rc; 1605 1606 if (level != SOL_IUCV) 1607 return -ENOPROTOOPT; 1608 1609 if (optlen < sizeof(int)) 1610 return -EINVAL; 1611 1612 if (get_user(val, (int __user *) optval)) 1613 return -EFAULT; 1614 1615 rc = 0; 1616 1617 lock_sock(sk); 1618 switch (optname) { 1619 case SO_IPRMDATA_MSG: 1620 if (val) 1621 iucv->flags |= IUCV_IPRMDATA; 1622 else 1623 iucv->flags &= ~IUCV_IPRMDATA; 1624 break; 1625 case SO_MSGLIMIT: 1626 switch (sk->sk_state) { 1627 case IUCV_OPEN: 1628 case IUCV_BOUND: 1629 if (val < 1 || val > (u16)(~0)) 1630 rc = -EINVAL; 1631 else 1632 iucv->msglimit = val; 1633 break; 1634 default: 1635 rc = -EINVAL; 1636 break; 1637 } 1638 break; 1639 default: 1640 rc = -ENOPROTOOPT; 1641 break; 1642 } 1643 release_sock(sk); 1644 1645 return rc; 1646 } 1647 1648 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, 1649 char __user *optval, int __user *optlen) 1650 { 1651 struct sock *sk = sock->sk; 1652 struct iucv_sock *iucv = iucv_sk(sk); 1653 unsigned int val; 1654 int len; 1655 1656 if (level != SOL_IUCV) 1657 return -ENOPROTOOPT; 1658 1659 if (get_user(len, optlen)) 1660 return -EFAULT; 1661 1662 if (len < 0) 1663 return -EINVAL; 1664 1665 len = min_t(unsigned int, len, sizeof(int)); 1666 1667 switch (optname) { 1668 case SO_IPRMDATA_MSG: 1669 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; 1670 break; 1671 case SO_MSGLIMIT: 1672 lock_sock(sk); 1673 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ 1674 : iucv->msglimit; /* default */ 1675 release_sock(sk); 1676 break; 1677 case SO_MSGSIZE: 1678 if (sk->sk_state == IUCV_OPEN) 1679 return -EBADFD; 1680 val = (iucv->hs_dev) ? iucv->hs_dev->mtu - 1681 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : 1682 0x7fffffff; 1683 break; 1684 default: 1685 return -ENOPROTOOPT; 1686 } 1687 1688 if (put_user(len, optlen)) 1689 return -EFAULT; 1690 if (copy_to_user(optval, &val, len)) 1691 return -EFAULT; 1692 1693 return 0; 1694 } 1695 1696 1697 /* Callback wrappers - called from iucv base support */ 1698 static int iucv_callback_connreq(struct iucv_path *path, 1699 u8 ipvmid[8], u8 ipuser[16]) 1700 { 1701 unsigned char user_data[16]; 1702 unsigned char nuser_data[16]; 1703 unsigned char src_name[8]; 1704 struct sock *sk, *nsk; 1705 struct iucv_sock *iucv, *niucv; 1706 int err; 1707 1708 memcpy(src_name, ipuser, 8); 1709 EBCASC(src_name, 8); 1710 /* Find out if this path belongs to af_iucv. */ 1711 read_lock(&iucv_sk_list.lock); 1712 iucv = NULL; 1713 sk = NULL; 1714 sk_for_each(sk, &iucv_sk_list.head) 1715 if (sk->sk_state == IUCV_LISTEN && 1716 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 1717 /* 1718 * Found a listening socket with 1719 * src_name == ipuser[0-7]. 1720 */ 1721 iucv = iucv_sk(sk); 1722 break; 1723 } 1724 read_unlock(&iucv_sk_list.lock); 1725 if (!iucv) 1726 /* No socket found, not one of our paths. */ 1727 return -EINVAL; 1728 1729 bh_lock_sock(sk); 1730 1731 /* Check if parent socket is listening */ 1732 low_nmcpy(user_data, iucv->src_name); 1733 high_nmcpy(user_data, iucv->dst_name); 1734 ASCEBC(user_data, sizeof(user_data)); 1735 if (sk->sk_state != IUCV_LISTEN) { 1736 err = pr_iucv->path_sever(path, user_data); 1737 iucv_path_free(path); 1738 goto fail; 1739 } 1740 1741 /* Check for backlog size */ 1742 if (sk_acceptq_is_full(sk)) { 1743 err = pr_iucv->path_sever(path, user_data); 1744 iucv_path_free(path); 1745 goto fail; 1746 } 1747 1748 /* Create the new socket */ 1749 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); 1750 if (!nsk) { 1751 err = pr_iucv->path_sever(path, user_data); 1752 iucv_path_free(path); 1753 goto fail; 1754 } 1755 1756 niucv = iucv_sk(nsk); 1757 iucv_sock_init(nsk, sk); 1758 1759 /* Set the new iucv_sock */ 1760 memcpy(niucv->dst_name, ipuser + 8, 8); 1761 EBCASC(niucv->dst_name, 8); 1762 memcpy(niucv->dst_user_id, ipvmid, 8); 1763 memcpy(niucv->src_name, iucv->src_name, 8); 1764 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1765 niucv->path = path; 1766 1767 /* Call iucv_accept */ 1768 high_nmcpy(nuser_data, ipuser + 8); 1769 memcpy(nuser_data + 8, niucv->src_name, 8); 1770 ASCEBC(nuser_data + 8, 8); 1771 1772 /* set message limit for path based on msglimit of accepting socket */ 1773 niucv->msglimit = iucv->msglimit; 1774 path->msglim = iucv->msglimit; 1775 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); 1776 if (err) { 1777 iucv_sever_path(nsk, 1); 1778 iucv_sock_kill(nsk); 1779 goto fail; 1780 } 1781 1782 iucv_accept_enqueue(sk, nsk); 1783 1784 /* Wake up accept */ 1785 nsk->sk_state = IUCV_CONNECTED; 1786 sk->sk_data_ready(sk); 1787 err = 0; 1788 fail: 1789 bh_unlock_sock(sk); 1790 return 0; 1791 } 1792 1793 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1794 { 1795 struct sock *sk = path->private; 1796 1797 sk->sk_state = IUCV_CONNECTED; 1798 sk->sk_state_change(sk); 1799 } 1800 1801 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1802 { 1803 struct sock *sk = path->private; 1804 struct iucv_sock *iucv = iucv_sk(sk); 1805 struct sk_buff *skb; 1806 struct sock_msg_q *save_msg; 1807 int len; 1808 1809 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1810 pr_iucv->message_reject(path, msg); 1811 return; 1812 } 1813 1814 spin_lock(&iucv->message_q.lock); 1815 1816 if (!list_empty(&iucv->message_q.list) || 1817 !skb_queue_empty(&iucv->backlog_skb_q)) 1818 goto save_message; 1819 1820 len = atomic_read(&sk->sk_rmem_alloc); 1821 len += SKB_TRUESIZE(iucv_msg_length(msg)); 1822 if (len > sk->sk_rcvbuf) 1823 goto save_message; 1824 1825 skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); 1826 if (!skb) 1827 goto save_message; 1828 1829 iucv_process_message(sk, skb, path, msg); 1830 goto out_unlock; 1831 1832 save_message: 1833 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1834 if (!save_msg) 1835 goto out_unlock; 1836 save_msg->path = path; 1837 save_msg->msg = *msg; 1838 1839 list_add_tail(&save_msg->list, &iucv->message_q.list); 1840 1841 out_unlock: 1842 spin_unlock(&iucv->message_q.lock); 1843 } 1844 1845 static void iucv_callback_txdone(struct iucv_path *path, 1846 struct iucv_message *msg) 1847 { 1848 struct sock *sk = path->private; 1849 struct sk_buff *this = NULL; 1850 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; 1851 struct sk_buff *list_skb = list->next; 1852 unsigned long flags; 1853 1854 bh_lock_sock(sk); 1855 if (!skb_queue_empty(list)) { 1856 spin_lock_irqsave(&list->lock, flags); 1857 1858 while (list_skb != (struct sk_buff *)list) { 1859 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { 1860 this = list_skb; 1861 break; 1862 } 1863 list_skb = list_skb->next; 1864 } 1865 if (this) 1866 __skb_unlink(this, list); 1867 1868 spin_unlock_irqrestore(&list->lock, flags); 1869 1870 if (this) { 1871 kfree_skb(this); 1872 /* wake up any process waiting for sending */ 1873 iucv_sock_wake_msglim(sk); 1874 } 1875 } 1876 1877 if (sk->sk_state == IUCV_CLOSING) { 1878 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1879 sk->sk_state = IUCV_CLOSED; 1880 sk->sk_state_change(sk); 1881 } 1882 } 1883 bh_unlock_sock(sk); 1884 1885 } 1886 1887 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1888 { 1889 struct sock *sk = path->private; 1890 1891 if (sk->sk_state == IUCV_CLOSED) 1892 return; 1893 1894 bh_lock_sock(sk); 1895 iucv_sever_path(sk, 1); 1896 sk->sk_state = IUCV_DISCONN; 1897 1898 sk->sk_state_change(sk); 1899 bh_unlock_sock(sk); 1900 } 1901 1902 /* called if the other communication side shuts down its RECV direction; 1903 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. 1904 */ 1905 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) 1906 { 1907 struct sock *sk = path->private; 1908 1909 bh_lock_sock(sk); 1910 if (sk->sk_state != IUCV_CLOSED) { 1911 sk->sk_shutdown |= SEND_SHUTDOWN; 1912 sk->sk_state_change(sk); 1913 } 1914 bh_unlock_sock(sk); 1915 } 1916 1917 /***************** HiperSockets transport callbacks ********************/ 1918 static void afiucv_swap_src_dest(struct sk_buff *skb) 1919 { 1920 struct af_iucv_trans_hdr *trans_hdr = 1921 (struct af_iucv_trans_hdr *)skb->data; 1922 char tmpID[8]; 1923 char tmpName[8]; 1924 1925 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 1926 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 1927 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 1928 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 1929 memcpy(tmpID, trans_hdr->srcUserID, 8); 1930 memcpy(tmpName, trans_hdr->srcAppName, 8); 1931 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); 1932 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); 1933 memcpy(trans_hdr->destUserID, tmpID, 8); 1934 memcpy(trans_hdr->destAppName, tmpName, 8); 1935 skb_push(skb, ETH_HLEN); 1936 memset(skb->data, 0, ETH_HLEN); 1937 } 1938 1939 /** 1940 * afiucv_hs_callback_syn - react on received SYN 1941 **/ 1942 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) 1943 { 1944 struct sock *nsk; 1945 struct iucv_sock *iucv, *niucv; 1946 struct af_iucv_trans_hdr *trans_hdr; 1947 int err; 1948 1949 iucv = iucv_sk(sk); 1950 trans_hdr = (struct af_iucv_trans_hdr *)skb->data; 1951 if (!iucv) { 1952 /* no sock - connection refused */ 1953 afiucv_swap_src_dest(skb); 1954 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1955 err = dev_queue_xmit(skb); 1956 goto out; 1957 } 1958 1959 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); 1960 bh_lock_sock(sk); 1961 if ((sk->sk_state != IUCV_LISTEN) || 1962 sk_acceptq_is_full(sk) || 1963 !nsk) { 1964 /* error on server socket - connection refused */ 1965 afiucv_swap_src_dest(skb); 1966 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1967 err = dev_queue_xmit(skb); 1968 iucv_sock_kill(nsk); 1969 bh_unlock_sock(sk); 1970 goto out; 1971 } 1972 1973 niucv = iucv_sk(nsk); 1974 iucv_sock_init(nsk, sk); 1975 niucv->transport = AF_IUCV_TRANS_HIPER; 1976 niucv->msglimit = iucv->msglimit; 1977 if (!trans_hdr->window) 1978 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; 1979 else 1980 niucv->msglimit_peer = trans_hdr->window; 1981 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); 1982 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); 1983 memcpy(niucv->src_name, iucv->src_name, 8); 1984 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1985 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; 1986 niucv->hs_dev = iucv->hs_dev; 1987 dev_hold(niucv->hs_dev); 1988 afiucv_swap_src_dest(skb); 1989 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; 1990 trans_hdr->window = niucv->msglimit; 1991 /* if receiver acks the xmit connection is established */ 1992 err = dev_queue_xmit(skb); 1993 if (!err) { 1994 iucv_accept_enqueue(sk, nsk); 1995 nsk->sk_state = IUCV_CONNECTED; 1996 sk->sk_data_ready(sk); 1997 } else 1998 iucv_sock_kill(nsk); 1999 bh_unlock_sock(sk); 2000 2001 out: 2002 return NET_RX_SUCCESS; 2003 } 2004 2005 /** 2006 * afiucv_hs_callback_synack() - react on received SYN-ACK 2007 **/ 2008 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) 2009 { 2010 struct iucv_sock *iucv = iucv_sk(sk); 2011 struct af_iucv_trans_hdr *trans_hdr = 2012 (struct af_iucv_trans_hdr *)skb->data; 2013 2014 if (!iucv) 2015 goto out; 2016 if (sk->sk_state != IUCV_BOUND) 2017 goto out; 2018 bh_lock_sock(sk); 2019 iucv->msglimit_peer = trans_hdr->window; 2020 sk->sk_state = IUCV_CONNECTED; 2021 sk->sk_state_change(sk); 2022 bh_unlock_sock(sk); 2023 out: 2024 kfree_skb(skb); 2025 return NET_RX_SUCCESS; 2026 } 2027 2028 /** 2029 * afiucv_hs_callback_synfin() - react on received SYN_FIN 2030 **/ 2031 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) 2032 { 2033 struct iucv_sock *iucv = iucv_sk(sk); 2034 2035 if (!iucv) 2036 goto out; 2037 if (sk->sk_state != IUCV_BOUND) 2038 goto out; 2039 bh_lock_sock(sk); 2040 sk->sk_state = IUCV_DISCONN; 2041 sk->sk_state_change(sk); 2042 bh_unlock_sock(sk); 2043 out: 2044 kfree_skb(skb); 2045 return NET_RX_SUCCESS; 2046 } 2047 2048 /** 2049 * afiucv_hs_callback_fin() - react on received FIN 2050 **/ 2051 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) 2052 { 2053 struct iucv_sock *iucv = iucv_sk(sk); 2054 2055 /* other end of connection closed */ 2056 if (!iucv) 2057 goto out; 2058 bh_lock_sock(sk); 2059 if (sk->sk_state == IUCV_CONNECTED) { 2060 sk->sk_state = IUCV_DISCONN; 2061 sk->sk_state_change(sk); 2062 } 2063 bh_unlock_sock(sk); 2064 out: 2065 kfree_skb(skb); 2066 return NET_RX_SUCCESS; 2067 } 2068 2069 /** 2070 * afiucv_hs_callback_win() - react on received WIN 2071 **/ 2072 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) 2073 { 2074 struct iucv_sock *iucv = iucv_sk(sk); 2075 struct af_iucv_trans_hdr *trans_hdr = 2076 (struct af_iucv_trans_hdr *)skb->data; 2077 2078 if (!iucv) 2079 return NET_RX_SUCCESS; 2080 2081 if (sk->sk_state != IUCV_CONNECTED) 2082 return NET_RX_SUCCESS; 2083 2084 atomic_sub(trans_hdr->window, &iucv->msg_sent); 2085 iucv_sock_wake_msglim(sk); 2086 return NET_RX_SUCCESS; 2087 } 2088 2089 /** 2090 * afiucv_hs_callback_rx() - react on received data 2091 **/ 2092 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) 2093 { 2094 struct iucv_sock *iucv = iucv_sk(sk); 2095 2096 if (!iucv) { 2097 kfree_skb(skb); 2098 return NET_RX_SUCCESS; 2099 } 2100 2101 if (sk->sk_state != IUCV_CONNECTED) { 2102 kfree_skb(skb); 2103 return NET_RX_SUCCESS; 2104 } 2105 2106 if (sk->sk_shutdown & RCV_SHUTDOWN) { 2107 kfree_skb(skb); 2108 return NET_RX_SUCCESS; 2109 } 2110 2111 /* write stuff from iucv_msg to skb cb */ 2112 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2113 skb_reset_transport_header(skb); 2114 skb_reset_network_header(skb); 2115 IUCV_SKB_CB(skb)->offset = 0; 2116 spin_lock(&iucv->message_q.lock); 2117 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2118 if (sock_queue_rcv_skb(sk, skb)) { 2119 /* handle rcv queue full */ 2120 skb_queue_tail(&iucv->backlog_skb_q, skb); 2121 } 2122 } else 2123 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 2124 spin_unlock(&iucv->message_q.lock); 2125 return NET_RX_SUCCESS; 2126 } 2127 2128 /** 2129 * afiucv_hs_rcv() - base function for arriving data through HiperSockets 2130 * transport 2131 * called from netif RX softirq 2132 **/ 2133 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 2134 struct packet_type *pt, struct net_device *orig_dev) 2135 { 2136 struct sock *sk; 2137 struct iucv_sock *iucv; 2138 struct af_iucv_trans_hdr *trans_hdr; 2139 char nullstring[8]; 2140 int err = 0; 2141 2142 if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { 2143 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", 2144 (int)skb->len, 2145 (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr))); 2146 kfree_skb(skb); 2147 return NET_RX_SUCCESS; 2148 } 2149 if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) 2150 if (skb_linearize(skb)) { 2151 WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d", 2152 (int)skb->len); 2153 kfree_skb(skb); 2154 return NET_RX_SUCCESS; 2155 } 2156 skb_pull(skb, ETH_HLEN); 2157 trans_hdr = (struct af_iucv_trans_hdr *)skb->data; 2158 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 2159 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 2160 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 2161 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 2162 memset(nullstring, 0, sizeof(nullstring)); 2163 iucv = NULL; 2164 sk = NULL; 2165 read_lock(&iucv_sk_list.lock); 2166 sk_for_each(sk, &iucv_sk_list.head) { 2167 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { 2168 if ((!memcmp(&iucv_sk(sk)->src_name, 2169 trans_hdr->destAppName, 8)) && 2170 (!memcmp(&iucv_sk(sk)->src_user_id, 2171 trans_hdr->destUserID, 8)) && 2172 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && 2173 (!memcmp(&iucv_sk(sk)->dst_user_id, 2174 nullstring, 8))) { 2175 iucv = iucv_sk(sk); 2176 break; 2177 } 2178 } else { 2179 if ((!memcmp(&iucv_sk(sk)->src_name, 2180 trans_hdr->destAppName, 8)) && 2181 (!memcmp(&iucv_sk(sk)->src_user_id, 2182 trans_hdr->destUserID, 8)) && 2183 (!memcmp(&iucv_sk(sk)->dst_name, 2184 trans_hdr->srcAppName, 8)) && 2185 (!memcmp(&iucv_sk(sk)->dst_user_id, 2186 trans_hdr->srcUserID, 8))) { 2187 iucv = iucv_sk(sk); 2188 break; 2189 } 2190 } 2191 } 2192 read_unlock(&iucv_sk_list.lock); 2193 if (!iucv) 2194 sk = NULL; 2195 2196 /* no sock 2197 how should we send with no sock 2198 1) send without sock no send rc checking? 2199 2) introduce default sock to handle this cases 2200 2201 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case 2202 data -> send FIN 2203 SYN|ACK, SYN|FIN, FIN -> no action? */ 2204 2205 switch (trans_hdr->flags) { 2206 case AF_IUCV_FLAG_SYN: 2207 /* connect request */ 2208 err = afiucv_hs_callback_syn(sk, skb); 2209 break; 2210 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): 2211 /* connect request confirmed */ 2212 err = afiucv_hs_callback_synack(sk, skb); 2213 break; 2214 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): 2215 /* connect request refused */ 2216 err = afiucv_hs_callback_synfin(sk, skb); 2217 break; 2218 case (AF_IUCV_FLAG_FIN): 2219 /* close request */ 2220 err = afiucv_hs_callback_fin(sk, skb); 2221 break; 2222 case (AF_IUCV_FLAG_WIN): 2223 err = afiucv_hs_callback_win(sk, skb); 2224 if (skb->len == sizeof(struct af_iucv_trans_hdr)) { 2225 kfree_skb(skb); 2226 break; 2227 } 2228 /* fall through and receive non-zero length data */ 2229 case (AF_IUCV_FLAG_SHT): 2230 /* shutdown request */ 2231 /* fall through and receive zero length data */ 2232 case 0: 2233 /* plain data frame */ 2234 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; 2235 err = afiucv_hs_callback_rx(sk, skb); 2236 break; 2237 default: 2238 ; 2239 } 2240 2241 return err; 2242 } 2243 2244 /** 2245 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets 2246 * transport 2247 **/ 2248 static void afiucv_hs_callback_txnotify(struct sk_buff *skb, 2249 enum iucv_tx_notify n) 2250 { 2251 struct sock *isk = skb->sk; 2252 struct sock *sk = NULL; 2253 struct iucv_sock *iucv = NULL; 2254 struct sk_buff_head *list; 2255 struct sk_buff *list_skb; 2256 struct sk_buff *nskb; 2257 unsigned long flags; 2258 2259 read_lock_irqsave(&iucv_sk_list.lock, flags); 2260 sk_for_each(sk, &iucv_sk_list.head) 2261 if (sk == isk) { 2262 iucv = iucv_sk(sk); 2263 break; 2264 } 2265 read_unlock_irqrestore(&iucv_sk_list.lock, flags); 2266 2267 if (!iucv || sock_flag(sk, SOCK_ZAPPED)) 2268 return; 2269 2270 list = &iucv->send_skb_q; 2271 spin_lock_irqsave(&list->lock, flags); 2272 if (skb_queue_empty(list)) 2273 goto out_unlock; 2274 list_skb = list->next; 2275 nskb = list_skb->next; 2276 while (list_skb != (struct sk_buff *)list) { 2277 if (skb_shinfo(list_skb) == skb_shinfo(skb)) { 2278 switch (n) { 2279 case TX_NOTIFY_OK: 2280 __skb_unlink(list_skb, list); 2281 kfree_skb(list_skb); 2282 iucv_sock_wake_msglim(sk); 2283 break; 2284 case TX_NOTIFY_PENDING: 2285 atomic_inc(&iucv->pendings); 2286 break; 2287 case TX_NOTIFY_DELAYED_OK: 2288 __skb_unlink(list_skb, list); 2289 atomic_dec(&iucv->pendings); 2290 if (atomic_read(&iucv->pendings) <= 0) 2291 iucv_sock_wake_msglim(sk); 2292 kfree_skb(list_skb); 2293 break; 2294 case TX_NOTIFY_UNREACHABLE: 2295 case TX_NOTIFY_DELAYED_UNREACHABLE: 2296 case TX_NOTIFY_TPQFULL: /* not yet used */ 2297 case TX_NOTIFY_GENERALERROR: 2298 case TX_NOTIFY_DELAYED_GENERALERROR: 2299 __skb_unlink(list_skb, list); 2300 kfree_skb(list_skb); 2301 if (sk->sk_state == IUCV_CONNECTED) { 2302 sk->sk_state = IUCV_DISCONN; 2303 sk->sk_state_change(sk); 2304 } 2305 break; 2306 } 2307 break; 2308 } 2309 list_skb = nskb; 2310 nskb = nskb->next; 2311 } 2312 out_unlock: 2313 spin_unlock_irqrestore(&list->lock, flags); 2314 2315 if (sk->sk_state == IUCV_CLOSING) { 2316 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 2317 sk->sk_state = IUCV_CLOSED; 2318 sk->sk_state_change(sk); 2319 } 2320 } 2321 2322 } 2323 2324 /* 2325 * afiucv_netdev_event: handle netdev notifier chain events 2326 */ 2327 static int afiucv_netdev_event(struct notifier_block *this, 2328 unsigned long event, void *ptr) 2329 { 2330 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2331 struct sock *sk; 2332 struct iucv_sock *iucv; 2333 2334 switch (event) { 2335 case NETDEV_REBOOT: 2336 case NETDEV_GOING_DOWN: 2337 sk_for_each(sk, &iucv_sk_list.head) { 2338 iucv = iucv_sk(sk); 2339 if ((iucv->hs_dev == event_dev) && 2340 (sk->sk_state == IUCV_CONNECTED)) { 2341 if (event == NETDEV_GOING_DOWN) 2342 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 2343 sk->sk_state = IUCV_DISCONN; 2344 sk->sk_state_change(sk); 2345 } 2346 } 2347 break; 2348 case NETDEV_DOWN: 2349 case NETDEV_UNREGISTER: 2350 default: 2351 break; 2352 } 2353 return NOTIFY_DONE; 2354 } 2355 2356 static struct notifier_block afiucv_netdev_notifier = { 2357 .notifier_call = afiucv_netdev_event, 2358 }; 2359 2360 static const struct proto_ops iucv_sock_ops = { 2361 .family = PF_IUCV, 2362 .owner = THIS_MODULE, 2363 .release = iucv_sock_release, 2364 .bind = iucv_sock_bind, 2365 .connect = iucv_sock_connect, 2366 .listen = iucv_sock_listen, 2367 .accept = iucv_sock_accept, 2368 .getname = iucv_sock_getname, 2369 .sendmsg = iucv_sock_sendmsg, 2370 .recvmsg = iucv_sock_recvmsg, 2371 .poll = iucv_sock_poll, 2372 .ioctl = sock_no_ioctl, 2373 .mmap = sock_no_mmap, 2374 .socketpair = sock_no_socketpair, 2375 .shutdown = iucv_sock_shutdown, 2376 .setsockopt = iucv_sock_setsockopt, 2377 .getsockopt = iucv_sock_getsockopt, 2378 }; 2379 2380 static const struct net_proto_family iucv_sock_family_ops = { 2381 .family = AF_IUCV, 2382 .owner = THIS_MODULE, 2383 .create = iucv_sock_create, 2384 }; 2385 2386 static struct packet_type iucv_packet_type = { 2387 .type = cpu_to_be16(ETH_P_AF_IUCV), 2388 .func = afiucv_hs_rcv, 2389 }; 2390 2391 static int afiucv_iucv_init(void) 2392 { 2393 int err; 2394 2395 err = pr_iucv->iucv_register(&af_iucv_handler, 0); 2396 if (err) 2397 goto out; 2398 /* establish dummy device */ 2399 af_iucv_driver.bus = pr_iucv->bus; 2400 err = driver_register(&af_iucv_driver); 2401 if (err) 2402 goto out_iucv; 2403 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); 2404 if (!af_iucv_dev) { 2405 err = -ENOMEM; 2406 goto out_driver; 2407 } 2408 dev_set_name(af_iucv_dev, "af_iucv"); 2409 af_iucv_dev->bus = pr_iucv->bus; 2410 af_iucv_dev->parent = pr_iucv->root; 2411 af_iucv_dev->release = (void (*)(struct device *))kfree; 2412 af_iucv_dev->driver = &af_iucv_driver; 2413 err = device_register(af_iucv_dev); 2414 if (err) 2415 goto out_driver; 2416 return 0; 2417 2418 out_driver: 2419 driver_unregister(&af_iucv_driver); 2420 out_iucv: 2421 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2422 out: 2423 return err; 2424 } 2425 2426 static int __init afiucv_init(void) 2427 { 2428 int err; 2429 2430 if (MACHINE_IS_VM) { 2431 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 2432 if (unlikely(err)) { 2433 WARN_ON(err); 2434 err = -EPROTONOSUPPORT; 2435 goto out; 2436 } 2437 2438 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); 2439 if (!pr_iucv) { 2440 printk(KERN_WARNING "iucv_if lookup failed\n"); 2441 memset(&iucv_userid, 0, sizeof(iucv_userid)); 2442 } 2443 } else { 2444 memset(&iucv_userid, 0, sizeof(iucv_userid)); 2445 pr_iucv = NULL; 2446 } 2447 2448 err = proto_register(&iucv_proto, 0); 2449 if (err) 2450 goto out; 2451 err = sock_register(&iucv_sock_family_ops); 2452 if (err) 2453 goto out_proto; 2454 2455 if (pr_iucv) { 2456 err = afiucv_iucv_init(); 2457 if (err) 2458 goto out_sock; 2459 } else 2460 register_netdevice_notifier(&afiucv_netdev_notifier); 2461 dev_add_pack(&iucv_packet_type); 2462 return 0; 2463 2464 out_sock: 2465 sock_unregister(PF_IUCV); 2466 out_proto: 2467 proto_unregister(&iucv_proto); 2468 out: 2469 if (pr_iucv) 2470 symbol_put(iucv_if); 2471 return err; 2472 } 2473 2474 static void __exit afiucv_exit(void) 2475 { 2476 if (pr_iucv) { 2477 device_unregister(af_iucv_dev); 2478 driver_unregister(&af_iucv_driver); 2479 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2480 symbol_put(iucv_if); 2481 } else 2482 unregister_netdevice_notifier(&afiucv_netdev_notifier); 2483 dev_remove_pack(&iucv_packet_type); 2484 sock_unregister(PF_IUCV); 2485 proto_unregister(&iucv_proto); 2486 } 2487 2488 module_init(afiucv_init); 2489 module_exit(afiucv_exit); 2490 2491 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); 2492 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); 2493 MODULE_VERSION(VERSION); 2494 MODULE_LICENSE("GPL"); 2495 MODULE_ALIAS_NETPROTO(PF_IUCV); 2496 2497