1 /* AF_RXRPC sendmsg() implementation. 2 * 3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/net.h> 15 #include <linux/gfp.h> 16 #include <linux/skbuff.h> 17 #include <linux/export.h> 18 #include <linux/sched/signal.h> 19 20 #include <net/sock.h> 21 #include <net/af_rxrpc.h> 22 #include "ar-internal.h" 23 24 /* 25 * Wait for space to appear in the Tx queue or a signal to occur. 26 */ 27 static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, 28 struct rxrpc_call *call, 29 long *timeo) 30 { 31 for (;;) { 32 set_current_state(TASK_INTERRUPTIBLE); 33 if (call->tx_top - call->tx_hard_ack < 34 min_t(unsigned int, call->tx_winsize, 35 call->cong_cwnd + call->cong_extra)) 36 return 0; 37 38 if (call->state >= RXRPC_CALL_COMPLETE) 39 return call->error; 40 41 if (signal_pending(current)) 42 return sock_intr_errno(*timeo); 43 44 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 45 mutex_unlock(&call->user_mutex); 46 *timeo = schedule_timeout(*timeo); 47 if (mutex_lock_interruptible(&call->user_mutex) < 0) 48 return sock_intr_errno(*timeo); 49 } 50 } 51 52 /* 53 * Wait for space to appear in the Tx queue uninterruptibly, but with 54 * a timeout of 2*RTT if no progress was made and a signal occurred. 55 */ 56 static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, 57 struct rxrpc_call *call) 58 { 59 rxrpc_seq_t tx_start, tx_win; 60 signed long rtt2, timeout; 61 u64 rtt; 62 63 rtt = READ_ONCE(call->peer->rtt); 64 rtt2 = nsecs_to_jiffies64(rtt) * 2; 65 if (rtt2 < 1) 66 rtt2 = 1; 67 68 timeout = rtt2; 69 tx_start = READ_ONCE(call->tx_hard_ack); 70 71 for (;;) { 72 set_current_state(TASK_UNINTERRUPTIBLE); 73 74 tx_win = READ_ONCE(call->tx_hard_ack); 75 if (call->tx_top - tx_win < 76 min_t(unsigned int, call->tx_winsize, 77 call->cong_cwnd + call->cong_extra)) 78 return 0; 79 80 if (call->state >= RXRPC_CALL_COMPLETE) 81 return call->error; 82 83 if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) && 84 timeout == 0 && 85 tx_win == tx_start && signal_pending(current)) 86 return -EINTR; 87 88 if (tx_win != tx_start) { 89 timeout = rtt2; 90 tx_start = tx_win; 91 } 92 93 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 94 timeout = schedule_timeout(timeout); 95 } 96 } 97 98 /* 99 * wait for space to appear in the transmit/ACK window 100 * - caller holds the socket locked 101 */ 102 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, 103 struct rxrpc_call *call, 104 long *timeo, 105 bool waitall) 106 { 107 DECLARE_WAITQUEUE(myself, current); 108 int ret; 109 110 _enter(",{%u,%u,%u}", 111 call->tx_hard_ack, call->tx_top, call->tx_winsize); 112 113 add_wait_queue(&call->waitq, &myself); 114 115 if (waitall) 116 ret = rxrpc_wait_for_tx_window_nonintr(rx, call); 117 else 118 ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); 119 120 remove_wait_queue(&call->waitq, &myself); 121 set_current_state(TASK_RUNNING); 122 _leave(" = %d", ret); 123 return ret; 124 } 125 126 /* 127 * Schedule an instant Tx resend. 128 */ 129 static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix) 130 { 131 spin_lock_bh(&call->lock); 132 133 if (call->state < RXRPC_CALL_COMPLETE) { 134 call->rxtx_annotations[ix] = 135 (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) | 136 RXRPC_TX_ANNO_RETRANS; 137 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 138 rxrpc_queue_call(call); 139 } 140 141 spin_unlock_bh(&call->lock); 142 } 143 144 /* 145 * Notify the owner of the call that the transmit phase is ended and the last 146 * packet has been queued. 147 */ 148 static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, 149 rxrpc_notify_end_tx_t notify_end_tx) 150 { 151 if (notify_end_tx) 152 notify_end_tx(&rx->sk, call, call->user_call_ID); 153 } 154 155 /* 156 * Queue a DATA packet for transmission, set the resend timeout and send 157 * the packet immediately. Returns the error from rxrpc_send_data_packet() 158 * in case the caller wants to do something with it. 159 */ 160 static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, 161 struct sk_buff *skb, bool last, 162 rxrpc_notify_end_tx_t notify_end_tx) 163 { 164 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 165 unsigned long now; 166 rxrpc_seq_t seq = sp->hdr.seq; 167 int ret, ix; 168 u8 annotation = RXRPC_TX_ANNO_UNACK; 169 170 _net("queue skb %p [%d]", skb, seq); 171 172 ASSERTCMP(seq, ==, call->tx_top + 1); 173 174 if (last) 175 annotation |= RXRPC_TX_ANNO_LAST; 176 177 /* We have to set the timestamp before queueing as the retransmit 178 * algorithm can see the packet as soon as we queue it. 179 */ 180 skb->tstamp = ktime_get_real(); 181 182 ix = seq & RXRPC_RXTX_BUFF_MASK; 183 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 184 call->rxtx_annotations[ix] = annotation; 185 smp_wmb(); 186 call->rxtx_buffer[ix] = skb; 187 call->tx_top = seq; 188 if (last) 189 trace_rxrpc_transmit(call, rxrpc_transmit_queue_last); 190 else 191 trace_rxrpc_transmit(call, rxrpc_transmit_queue); 192 193 if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { 194 _debug("________awaiting reply/ACK__________"); 195 write_lock_bh(&call->state_lock); 196 switch (call->state) { 197 case RXRPC_CALL_CLIENT_SEND_REQUEST: 198 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 199 rxrpc_notify_end_tx(rx, call, notify_end_tx); 200 break; 201 case RXRPC_CALL_SERVER_ACK_REQUEST: 202 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 203 now = jiffies; 204 WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET); 205 if (call->ackr_reason == RXRPC_ACK_DELAY) 206 call->ackr_reason = 0; 207 trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); 208 if (!last) 209 break; 210 /* Fall through */ 211 case RXRPC_CALL_SERVER_SEND_REPLY: 212 call->state = RXRPC_CALL_SERVER_AWAIT_ACK; 213 rxrpc_notify_end_tx(rx, call, notify_end_tx); 214 break; 215 default: 216 break; 217 } 218 write_unlock_bh(&call->state_lock); 219 } 220 221 if (seq == 1 && rxrpc_is_client_call(call)) 222 rxrpc_expose_client_call(call); 223 224 ret = rxrpc_send_data_packet(call, skb, false); 225 if (ret < 0) { 226 switch (ret) { 227 case -ENETUNREACH: 228 case -EHOSTUNREACH: 229 case -ECONNREFUSED: 230 rxrpc_set_call_completion(call, 231 RXRPC_CALL_LOCAL_ERROR, 232 0, ret); 233 goto out; 234 } 235 _debug("need instant resend %d", ret); 236 rxrpc_instant_resend(call, ix); 237 } else { 238 unsigned long now = jiffies, resend_at; 239 240 if (call->peer->rtt_usage > 1) 241 resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2); 242 else 243 resend_at = rxrpc_resend_timeout; 244 if (resend_at < 1) 245 resend_at = 1; 246 247 resend_at += now; 248 WRITE_ONCE(call->resend_at, resend_at); 249 rxrpc_reduce_call_timer(call, resend_at, now, 250 rxrpc_timer_set_for_send); 251 } 252 253 out: 254 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 255 _leave(" = %d", ret); 256 return ret; 257 } 258 259 /* 260 * send data through a socket 261 * - must be called in process context 262 * - The caller holds the call user access mutex, but not the socket lock. 263 */ 264 static int rxrpc_send_data(struct rxrpc_sock *rx, 265 struct rxrpc_call *call, 266 struct msghdr *msg, size_t len, 267 rxrpc_notify_end_tx_t notify_end_tx) 268 { 269 struct rxrpc_skb_priv *sp; 270 struct sk_buff *skb; 271 struct sock *sk = &rx->sk; 272 long timeo; 273 bool more; 274 int ret, copied; 275 276 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 277 278 /* this should be in poll */ 279 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 280 281 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 282 return -EPIPE; 283 284 more = msg->msg_flags & MSG_MORE; 285 286 if (call->tx_total_len != -1) { 287 if (len > call->tx_total_len) 288 return -EMSGSIZE; 289 if (!more && len != call->tx_total_len) 290 return -EMSGSIZE; 291 } 292 293 skb = call->tx_pending; 294 call->tx_pending = NULL; 295 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 296 297 copied = 0; 298 do { 299 /* Check to see if there's a ping ACK to reply to. */ 300 if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) 301 rxrpc_send_ack_packet(call, false, NULL); 302 303 if (!skb) { 304 size_t size, chunk, max, space; 305 306 _debug("alloc"); 307 308 if (call->tx_top - call->tx_hard_ack >= 309 min_t(unsigned int, call->tx_winsize, 310 call->cong_cwnd + call->cong_extra)) { 311 ret = -EAGAIN; 312 if (msg->msg_flags & MSG_DONTWAIT) 313 goto maybe_error; 314 ret = rxrpc_wait_for_tx_window(rx, call, 315 &timeo, 316 msg->msg_flags & MSG_WAITALL); 317 if (ret < 0) 318 goto maybe_error; 319 } 320 321 max = RXRPC_JUMBO_DATALEN; 322 max -= call->conn->security_size; 323 max &= ~(call->conn->size_align - 1UL); 324 325 chunk = max; 326 if (chunk > msg_data_left(msg) && !more) 327 chunk = msg_data_left(msg); 328 329 space = chunk + call->conn->size_align; 330 space &= ~(call->conn->size_align - 1UL); 331 332 size = space + call->conn->security_size; 333 334 _debug("SIZE: %zu/%zu/%zu", chunk, space, size); 335 336 /* create a buffer that we can retain until it's ACK'd */ 337 skb = sock_alloc_send_skb( 338 sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); 339 if (!skb) 340 goto maybe_error; 341 342 rxrpc_new_skb(skb, rxrpc_skb_tx_new); 343 344 _debug("ALLOC SEND %p", skb); 345 346 ASSERTCMP(skb->mark, ==, 0); 347 348 _debug("HS: %u", call->conn->security_size); 349 skb_reserve(skb, call->conn->security_size); 350 skb->len += call->conn->security_size; 351 352 sp = rxrpc_skb(skb); 353 sp->remain = chunk; 354 if (sp->remain > skb_tailroom(skb)) 355 sp->remain = skb_tailroom(skb); 356 357 _net("skb: hr %d, tr %d, hl %d, rm %d", 358 skb_headroom(skb), 359 skb_tailroom(skb), 360 skb_headlen(skb), 361 sp->remain); 362 363 skb->ip_summed = CHECKSUM_UNNECESSARY; 364 } 365 366 _debug("append"); 367 sp = rxrpc_skb(skb); 368 369 /* append next segment of data to the current buffer */ 370 if (msg_data_left(msg) > 0) { 371 int copy = skb_tailroom(skb); 372 ASSERTCMP(copy, >, 0); 373 if (copy > msg_data_left(msg)) 374 copy = msg_data_left(msg); 375 if (copy > sp->remain) 376 copy = sp->remain; 377 378 _debug("add"); 379 ret = skb_add_data(skb, &msg->msg_iter, copy); 380 _debug("added"); 381 if (ret < 0) 382 goto efault; 383 sp->remain -= copy; 384 skb->mark += copy; 385 copied += copy; 386 if (call->tx_total_len != -1) 387 call->tx_total_len -= copy; 388 } 389 390 /* check for the far side aborting the call or a network error 391 * occurring */ 392 if (call->state == RXRPC_CALL_COMPLETE) 393 goto call_terminated; 394 395 /* add the packet to the send queue if it's now full */ 396 if (sp->remain <= 0 || 397 (msg_data_left(msg) == 0 && !more)) { 398 struct rxrpc_connection *conn = call->conn; 399 uint32_t seq; 400 size_t pad; 401 402 /* pad out if we're using security */ 403 if (conn->security_ix) { 404 pad = conn->security_size + skb->mark; 405 pad = conn->size_align - pad; 406 pad &= conn->size_align - 1; 407 _debug("pad %zu", pad); 408 if (pad) 409 skb_put_zero(skb, pad); 410 } 411 412 seq = call->tx_top + 1; 413 414 sp->hdr.seq = seq; 415 sp->hdr._rsvd = 0; 416 sp->hdr.flags = conn->out_clientflag; 417 418 if (msg_data_left(msg) == 0 && !more) 419 sp->hdr.flags |= RXRPC_LAST_PACKET; 420 else if (call->tx_top - call->tx_hard_ack < 421 call->tx_winsize) 422 sp->hdr.flags |= RXRPC_MORE_PACKETS; 423 424 ret = conn->security->secure_packet( 425 call, skb, skb->mark, skb->head); 426 if (ret < 0) 427 goto out; 428 429 ret = rxrpc_queue_packet(rx, call, skb, 430 !msg_data_left(msg) && !more, 431 notify_end_tx); 432 /* Should check for failure here */ 433 skb = NULL; 434 } 435 } while (msg_data_left(msg) > 0); 436 437 success: 438 ret = copied; 439 out: 440 call->tx_pending = skb; 441 _leave(" = %d", ret); 442 return ret; 443 444 call_terminated: 445 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 446 _leave(" = %d", call->error); 447 return call->error; 448 449 maybe_error: 450 if (copied) 451 goto success; 452 goto out; 453 454 efault: 455 ret = -EFAULT; 456 goto out; 457 } 458 459 /* 460 * extract control messages from the sendmsg() control buffer 461 */ 462 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) 463 { 464 struct cmsghdr *cmsg; 465 bool got_user_ID = false; 466 int len; 467 468 if (msg->msg_controllen == 0) 469 return -EINVAL; 470 471 for_each_cmsghdr(cmsg, msg) { 472 if (!CMSG_OK(msg, cmsg)) 473 return -EINVAL; 474 475 len = cmsg->cmsg_len - sizeof(struct cmsghdr); 476 _debug("CMSG %d, %d, %d", 477 cmsg->cmsg_level, cmsg->cmsg_type, len); 478 479 if (cmsg->cmsg_level != SOL_RXRPC) 480 continue; 481 482 switch (cmsg->cmsg_type) { 483 case RXRPC_USER_CALL_ID: 484 if (msg->msg_flags & MSG_CMSG_COMPAT) { 485 if (len != sizeof(u32)) 486 return -EINVAL; 487 p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); 488 } else { 489 if (len != sizeof(unsigned long)) 490 return -EINVAL; 491 p->call.user_call_ID = *(unsigned long *) 492 CMSG_DATA(cmsg); 493 } 494 got_user_ID = true; 495 break; 496 497 case RXRPC_ABORT: 498 if (p->command != RXRPC_CMD_SEND_DATA) 499 return -EINVAL; 500 p->command = RXRPC_CMD_SEND_ABORT; 501 if (len != sizeof(p->abort_code)) 502 return -EINVAL; 503 p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); 504 if (p->abort_code == 0) 505 return -EINVAL; 506 break; 507 508 case RXRPC_ACCEPT: 509 if (p->command != RXRPC_CMD_SEND_DATA) 510 return -EINVAL; 511 p->command = RXRPC_CMD_ACCEPT; 512 if (len != 0) 513 return -EINVAL; 514 break; 515 516 case RXRPC_EXCLUSIVE_CALL: 517 p->exclusive = true; 518 if (len != 0) 519 return -EINVAL; 520 break; 521 522 case RXRPC_UPGRADE_SERVICE: 523 p->upgrade = true; 524 if (len != 0) 525 return -EINVAL; 526 break; 527 528 case RXRPC_TX_LENGTH: 529 if (p->call.tx_total_len != -1 || len != sizeof(__s64)) 530 return -EINVAL; 531 p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 532 if (p->call.tx_total_len < 0) 533 return -EINVAL; 534 break; 535 536 case RXRPC_SET_CALL_TIMEOUT: 537 if (len & 3 || len < 4 || len > 12) 538 return -EINVAL; 539 memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); 540 p->call.nr_timeouts = len / 4; 541 if (p->call.timeouts.hard > INT_MAX / HZ) 542 return -ERANGE; 543 if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) 544 return -ERANGE; 545 if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) 546 return -ERANGE; 547 break; 548 549 default: 550 return -EINVAL; 551 } 552 } 553 554 if (!got_user_ID) 555 return -EINVAL; 556 if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 557 return -EINVAL; 558 _leave(" = 0"); 559 return 0; 560 } 561 562 /* 563 * Create a new client call for sendmsg(). 564 * - Called with the socket lock held, which it must release. 565 * - If it returns a call, the call's lock will need releasing by the caller. 566 */ 567 static struct rxrpc_call * 568 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 569 struct rxrpc_send_params *p) 570 __releases(&rx->sk.sk_lock.slock) 571 __acquires(&call->user_mutex) 572 { 573 struct rxrpc_conn_parameters cp; 574 struct rxrpc_call *call; 575 struct key *key; 576 577 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 578 579 _enter(""); 580 581 if (!msg->msg_name) { 582 release_sock(&rx->sk); 583 return ERR_PTR(-EDESTADDRREQ); 584 } 585 586 key = rx->key; 587 if (key && !rx->key->payload.data[0]) 588 key = NULL; 589 590 memset(&cp, 0, sizeof(cp)); 591 cp.local = rx->local; 592 cp.key = rx->key; 593 cp.security_level = rx->min_sec_level; 594 cp.exclusive = rx->exclusive | p->exclusive; 595 cp.upgrade = p->upgrade; 596 cp.service_id = srx->srx_service; 597 call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL, 598 atomic_inc_return(&rxrpc_debug_id)); 599 /* The socket is now unlocked */ 600 601 rxrpc_put_peer(cp.peer); 602 _leave(" = %p\n", call); 603 return call; 604 } 605 606 /* 607 * send a message forming part of a client call through an RxRPC socket 608 * - caller holds the socket locked 609 * - the socket may be either a client socket or a server socket 610 */ 611 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 612 __releases(&rx->sk.sk_lock.slock) 613 __releases(&call->user_mutex) 614 { 615 enum rxrpc_call_state state; 616 struct rxrpc_call *call; 617 unsigned long now, j; 618 int ret; 619 620 struct rxrpc_send_params p = { 621 .call.tx_total_len = -1, 622 .call.user_call_ID = 0, 623 .call.nr_timeouts = 0, 624 .call.intr = true, 625 .abort_code = 0, 626 .command = RXRPC_CMD_SEND_DATA, 627 .exclusive = false, 628 .upgrade = false, 629 }; 630 631 _enter(""); 632 633 ret = rxrpc_sendmsg_cmsg(msg, &p); 634 if (ret < 0) 635 goto error_release_sock; 636 637 if (p.command == RXRPC_CMD_ACCEPT) { 638 ret = -EINVAL; 639 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 640 goto error_release_sock; 641 call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); 642 /* The socket is now unlocked. */ 643 if (IS_ERR(call)) 644 return PTR_ERR(call); 645 ret = 0; 646 goto out_put_unlock; 647 } 648 649 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); 650 if (!call) { 651 ret = -EBADSLT; 652 if (p.command != RXRPC_CMD_SEND_DATA) 653 goto error_release_sock; 654 call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p); 655 /* The socket is now unlocked... */ 656 if (IS_ERR(call)) 657 return PTR_ERR(call); 658 /* ... and we have the call lock. */ 659 } else { 660 switch (READ_ONCE(call->state)) { 661 case RXRPC_CALL_UNINITIALISED: 662 case RXRPC_CALL_CLIENT_AWAIT_CONN: 663 case RXRPC_CALL_SERVER_PREALLOC: 664 case RXRPC_CALL_SERVER_SECURING: 665 case RXRPC_CALL_SERVER_ACCEPTING: 666 ret = -EBUSY; 667 goto error_release_sock; 668 default: 669 break; 670 } 671 672 ret = mutex_lock_interruptible(&call->user_mutex); 673 release_sock(&rx->sk); 674 if (ret < 0) { 675 ret = -ERESTARTSYS; 676 goto error_put; 677 } 678 679 if (p.call.tx_total_len != -1) { 680 ret = -EINVAL; 681 if (call->tx_total_len != -1 || 682 call->tx_pending || 683 call->tx_top != 0) 684 goto error_put; 685 call->tx_total_len = p.call.tx_total_len; 686 } 687 } 688 689 switch (p.call.nr_timeouts) { 690 case 3: 691 j = msecs_to_jiffies(p.call.timeouts.normal); 692 if (p.call.timeouts.normal > 0 && j == 0) 693 j = 1; 694 WRITE_ONCE(call->next_rx_timo, j); 695 /* Fall through */ 696 case 2: 697 j = msecs_to_jiffies(p.call.timeouts.idle); 698 if (p.call.timeouts.idle > 0 && j == 0) 699 j = 1; 700 WRITE_ONCE(call->next_req_timo, j); 701 /* Fall through */ 702 case 1: 703 if (p.call.timeouts.hard > 0) { 704 j = msecs_to_jiffies(p.call.timeouts.hard); 705 now = jiffies; 706 j += now; 707 WRITE_ONCE(call->expect_term_by, j); 708 rxrpc_reduce_call_timer(call, j, now, 709 rxrpc_timer_set_for_hard); 710 } 711 break; 712 } 713 714 state = READ_ONCE(call->state); 715 _debug("CALL %d USR %lx ST %d on CONN %p", 716 call->debug_id, call->user_call_ID, state, call->conn); 717 718 if (state >= RXRPC_CALL_COMPLETE) { 719 /* it's too late for this call */ 720 ret = -ESHUTDOWN; 721 } else if (p.command == RXRPC_CMD_SEND_ABORT) { 722 ret = 0; 723 if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED)) 724 ret = rxrpc_send_abort_packet(call); 725 } else if (p.command != RXRPC_CMD_SEND_DATA) { 726 ret = -EINVAL; 727 } else if (rxrpc_is_client_call(call) && 728 state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 729 /* request phase complete for this client call */ 730 ret = -EPROTO; 731 } else if (rxrpc_is_service_call(call) && 732 state != RXRPC_CALL_SERVER_ACK_REQUEST && 733 state != RXRPC_CALL_SERVER_SEND_REPLY) { 734 /* Reply phase not begun or not complete for service call. */ 735 ret = -EPROTO; 736 } else { 737 ret = rxrpc_send_data(rx, call, msg, len, NULL); 738 } 739 740 out_put_unlock: 741 mutex_unlock(&call->user_mutex); 742 error_put: 743 rxrpc_put_call(call, rxrpc_call_put); 744 _leave(" = %d", ret); 745 return ret; 746 747 error_release_sock: 748 release_sock(&rx->sk); 749 return ret; 750 } 751 752 /** 753 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call 754 * @sock: The socket the call is on 755 * @call: The call to send data through 756 * @msg: The data to send 757 * @len: The amount of data to send 758 * @notify_end_tx: Notification that the last packet is queued. 759 * 760 * Allow a kernel service to send data on a call. The call must be in an state 761 * appropriate to sending data. No control data should be supplied in @msg, 762 * nor should an address be supplied. MSG_MORE should be flagged if there's 763 * more data to come, otherwise this data will end the transmission phase. 764 */ 765 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, 766 struct msghdr *msg, size_t len, 767 rxrpc_notify_end_tx_t notify_end_tx) 768 { 769 int ret; 770 771 _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); 772 773 ASSERTCMP(msg->msg_name, ==, NULL); 774 ASSERTCMP(msg->msg_control, ==, NULL); 775 776 mutex_lock(&call->user_mutex); 777 778 _debug("CALL %d USR %lx ST %d on CONN %p", 779 call->debug_id, call->user_call_ID, call->state, call->conn); 780 781 switch (READ_ONCE(call->state)) { 782 case RXRPC_CALL_CLIENT_SEND_REQUEST: 783 case RXRPC_CALL_SERVER_ACK_REQUEST: 784 case RXRPC_CALL_SERVER_SEND_REPLY: 785 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 786 notify_end_tx); 787 break; 788 case RXRPC_CALL_COMPLETE: 789 read_lock_bh(&call->state_lock); 790 ret = call->error; 791 read_unlock_bh(&call->state_lock); 792 break; 793 default: 794 /* Request phase complete for this client call */ 795 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send")); 796 ret = -EPROTO; 797 break; 798 } 799 800 mutex_unlock(&call->user_mutex); 801 _leave(" = %d", ret); 802 return ret; 803 } 804 EXPORT_SYMBOL(rxrpc_kernel_send_data); 805 806 /** 807 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 808 * @sock: The socket the call is on 809 * @call: The call to be aborted 810 * @abort_code: The abort code to stick into the ABORT packet 811 * @error: Local error value 812 * @why: 3-char string indicating why. 813 * 814 * Allow a kernel service to abort a call, if it's still in an abortable state 815 * and return true if the call was aborted, false if it was already complete. 816 */ 817 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, 818 u32 abort_code, int error, const char *why) 819 { 820 bool aborted; 821 822 _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); 823 824 mutex_lock(&call->user_mutex); 825 826 aborted = rxrpc_abort_call(why, call, 0, abort_code, error); 827 if (aborted) 828 rxrpc_send_abort_packet(call); 829 830 mutex_unlock(&call->user_mutex); 831 return aborted; 832 } 833 EXPORT_SYMBOL(rxrpc_kernel_abort_call); 834 835 /** 836 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call 837 * @sock: The socket the call is on 838 * @call: The call to be informed 839 * @tx_total_len: The amount of data to be transmitted for this call 840 * 841 * Allow a kernel service to set the total transmit length on a call. This 842 * allows buffer-to-packet encrypt-and-copy to be performed. 843 * 844 * This function is primarily for use for setting the reply length since the 845 * request length can be set when beginning the call. 846 */ 847 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, 848 s64 tx_total_len) 849 { 850 WARN_ON(call->tx_total_len != -1); 851 call->tx_total_len = tx_total_len; 852 } 853 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); 854