1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AF_RXRPC sendmsg() implementation. 3 * 4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/gfp.h> 12 #include <linux/skbuff.h> 13 #include <linux/export.h> 14 #include <linux/sched/signal.h> 15 16 #include <net/sock.h> 17 #include <net/af_rxrpc.h> 18 #include "ar-internal.h" 19 20 /* 21 * Return true if there's sufficient Tx queue space. 22 */ 23 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) 24 { 25 unsigned int win_size = 26 min_t(unsigned int, call->tx_winsize, 27 call->cong_cwnd + call->cong_extra); 28 rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack); 29 30 if (_tx_win) 31 *_tx_win = tx_win; 32 return call->tx_top - tx_win < win_size; 33 } 34 35 /* 36 * Wait for space to appear in the Tx queue or a signal to occur. 37 */ 38 static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, 39 struct rxrpc_call *call, 40 long *timeo) 41 { 42 for (;;) { 43 set_current_state(TASK_INTERRUPTIBLE); 44 if (rxrpc_check_tx_space(call, NULL)) 45 return 0; 46 47 if (call->state >= RXRPC_CALL_COMPLETE) 48 return call->error; 49 50 if (signal_pending(current)) 51 return sock_intr_errno(*timeo); 52 53 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 54 mutex_unlock(&call->user_mutex); 55 *timeo = schedule_timeout(*timeo); 56 if (mutex_lock_interruptible(&call->user_mutex) < 0) 57 return sock_intr_errno(*timeo); 58 } 59 } 60 61 /* 62 * Wait for space to appear in the Tx queue uninterruptibly, but with 63 * a timeout of 2*RTT if no progress was made and a signal occurred. 64 */ 65 static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, 66 struct rxrpc_call *call) 67 { 68 rxrpc_seq_t tx_start, tx_win; 69 signed long rtt, timeout; 70 71 rtt = READ_ONCE(call->peer->srtt_us) >> 3; 72 rtt = usecs_to_jiffies(rtt) * 2; 73 if (rtt < 2) 74 rtt = 2; 75 76 timeout = rtt; 77 tx_start = READ_ONCE(call->tx_hard_ack); 78 79 for (;;) { 80 set_current_state(TASK_UNINTERRUPTIBLE); 81 82 tx_win = READ_ONCE(call->tx_hard_ack); 83 if (rxrpc_check_tx_space(call, &tx_win)) 84 return 0; 85 86 if (call->state >= RXRPC_CALL_COMPLETE) 87 return call->error; 88 89 if (timeout == 0 && 90 tx_win == tx_start && signal_pending(current)) 91 return -EINTR; 92 93 if (tx_win != tx_start) { 94 timeout = rtt; 95 tx_start = tx_win; 96 } 97 98 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 99 timeout = schedule_timeout(timeout); 100 } 101 } 102 103 /* 104 * Wait for space to appear in the Tx queue uninterruptibly. 105 */ 106 static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, 107 struct rxrpc_call *call, 108 long *timeo) 109 { 110 for (;;) { 111 set_current_state(TASK_UNINTERRUPTIBLE); 112 if (rxrpc_check_tx_space(call, NULL)) 113 return 0; 114 115 if (call->state >= RXRPC_CALL_COMPLETE) 116 return call->error; 117 118 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 119 *timeo = schedule_timeout(*timeo); 120 } 121 } 122 123 /* 124 * wait for space to appear in the transmit/ACK window 125 * - caller holds the socket locked 126 */ 127 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, 128 struct rxrpc_call *call, 129 long *timeo, 130 bool waitall) 131 { 132 DECLARE_WAITQUEUE(myself, current); 133 int ret; 134 135 _enter(",{%u,%u,%u}", 136 call->tx_hard_ack, call->tx_top, call->tx_winsize); 137 138 add_wait_queue(&call->waitq, &myself); 139 140 switch (call->interruptibility) { 141 case RXRPC_INTERRUPTIBLE: 142 if (waitall) 143 ret = rxrpc_wait_for_tx_window_waitall(rx, call); 144 else 145 ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); 146 break; 147 case RXRPC_PREINTERRUPTIBLE: 148 case RXRPC_UNINTERRUPTIBLE: 149 default: 150 ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); 151 break; 152 } 153 154 remove_wait_queue(&call->waitq, &myself); 155 set_current_state(TASK_RUNNING); 156 _leave(" = %d", ret); 157 return ret; 158 } 159 160 /* 161 * Schedule an instant Tx resend. 162 */ 163 static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix) 164 { 165 spin_lock_bh(&call->lock); 166 167 if (call->state < RXRPC_CALL_COMPLETE) { 168 call->rxtx_annotations[ix] = 169 (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) | 170 RXRPC_TX_ANNO_RETRANS; 171 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 172 rxrpc_queue_call(call); 173 } 174 175 spin_unlock_bh(&call->lock); 176 } 177 178 /* 179 * Notify the owner of the call that the transmit phase is ended and the last 180 * packet has been queued. 181 */ 182 static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, 183 rxrpc_notify_end_tx_t notify_end_tx) 184 { 185 if (notify_end_tx) 186 notify_end_tx(&rx->sk, call, call->user_call_ID); 187 } 188 189 /* 190 * Queue a DATA packet for transmission, set the resend timeout and send 191 * the packet immediately. Returns the error from rxrpc_send_data_packet() 192 * in case the caller wants to do something with it. 193 */ 194 static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, 195 struct sk_buff *skb, bool last, 196 rxrpc_notify_end_tx_t notify_end_tx) 197 { 198 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 199 unsigned long now; 200 rxrpc_seq_t seq = sp->hdr.seq; 201 int ret, ix; 202 u8 annotation = RXRPC_TX_ANNO_UNACK; 203 204 _net("queue skb %p [%d]", skb, seq); 205 206 ASSERTCMP(seq, ==, call->tx_top + 1); 207 208 if (last) 209 annotation |= RXRPC_TX_ANNO_LAST; 210 211 /* We have to set the timestamp before queueing as the retransmit 212 * algorithm can see the packet as soon as we queue it. 213 */ 214 skb->tstamp = ktime_get_real(); 215 216 ix = seq & RXRPC_RXTX_BUFF_MASK; 217 rxrpc_get_skb(skb, rxrpc_skb_got); 218 call->rxtx_annotations[ix] = annotation; 219 smp_wmb(); 220 call->rxtx_buffer[ix] = skb; 221 call->tx_top = seq; 222 if (last) 223 trace_rxrpc_transmit(call, rxrpc_transmit_queue_last); 224 else 225 trace_rxrpc_transmit(call, rxrpc_transmit_queue); 226 227 if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { 228 _debug("________awaiting reply/ACK__________"); 229 write_lock_bh(&call->state_lock); 230 switch (call->state) { 231 case RXRPC_CALL_CLIENT_SEND_REQUEST: 232 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 233 rxrpc_notify_end_tx(rx, call, notify_end_tx); 234 break; 235 case RXRPC_CALL_SERVER_ACK_REQUEST: 236 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 237 now = jiffies; 238 WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET); 239 if (call->ackr_reason == RXRPC_ACK_DELAY) 240 call->ackr_reason = 0; 241 trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); 242 if (!last) 243 break; 244 /* Fall through */ 245 case RXRPC_CALL_SERVER_SEND_REPLY: 246 call->state = RXRPC_CALL_SERVER_AWAIT_ACK; 247 rxrpc_notify_end_tx(rx, call, notify_end_tx); 248 break; 249 default: 250 break; 251 } 252 write_unlock_bh(&call->state_lock); 253 } 254 255 if (seq == 1 && rxrpc_is_client_call(call)) 256 rxrpc_expose_client_call(call); 257 258 ret = rxrpc_send_data_packet(call, skb, false); 259 if (ret < 0) { 260 switch (ret) { 261 case -ENETUNREACH: 262 case -EHOSTUNREACH: 263 case -ECONNREFUSED: 264 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 265 0, ret); 266 goto out; 267 } 268 _debug("need instant resend %d", ret); 269 rxrpc_instant_resend(call, ix); 270 } else { 271 unsigned long now = jiffies; 272 unsigned long resend_at = now + call->peer->rto_j; 273 274 WRITE_ONCE(call->resend_at, resend_at); 275 rxrpc_reduce_call_timer(call, resend_at, now, 276 rxrpc_timer_set_for_send); 277 } 278 279 out: 280 rxrpc_free_skb(skb, rxrpc_skb_freed); 281 _leave(" = %d", ret); 282 return ret; 283 } 284 285 /* 286 * send data through a socket 287 * - must be called in process context 288 * - The caller holds the call user access mutex, but not the socket lock. 289 */ 290 static int rxrpc_send_data(struct rxrpc_sock *rx, 291 struct rxrpc_call *call, 292 struct msghdr *msg, size_t len, 293 rxrpc_notify_end_tx_t notify_end_tx) 294 { 295 struct rxrpc_skb_priv *sp; 296 struct sk_buff *skb; 297 struct sock *sk = &rx->sk; 298 long timeo; 299 bool more; 300 int ret, copied; 301 302 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 303 304 /* this should be in poll */ 305 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 306 307 if (sk->sk_shutdown & SEND_SHUTDOWN) 308 return -EPIPE; 309 310 more = msg->msg_flags & MSG_MORE; 311 312 if (call->tx_total_len != -1) { 313 if (len > call->tx_total_len) 314 return -EMSGSIZE; 315 if (!more && len != call->tx_total_len) 316 return -EMSGSIZE; 317 } 318 319 skb = call->tx_pending; 320 call->tx_pending = NULL; 321 rxrpc_see_skb(skb, rxrpc_skb_seen); 322 323 copied = 0; 324 do { 325 /* Check to see if there's a ping ACK to reply to. */ 326 if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) 327 rxrpc_send_ack_packet(call, false, NULL); 328 329 if (!skb) { 330 size_t size, chunk, max, space; 331 332 _debug("alloc"); 333 334 if (!rxrpc_check_tx_space(call, NULL)) { 335 ret = -EAGAIN; 336 if (msg->msg_flags & MSG_DONTWAIT) 337 goto maybe_error; 338 ret = rxrpc_wait_for_tx_window(rx, call, 339 &timeo, 340 msg->msg_flags & MSG_WAITALL); 341 if (ret < 0) 342 goto maybe_error; 343 } 344 345 max = RXRPC_JUMBO_DATALEN; 346 max -= call->conn->security_size; 347 max &= ~(call->conn->size_align - 1UL); 348 349 chunk = max; 350 if (chunk > msg_data_left(msg) && !more) 351 chunk = msg_data_left(msg); 352 353 space = chunk + call->conn->size_align; 354 space &= ~(call->conn->size_align - 1UL); 355 356 size = space + call->conn->security_size; 357 358 _debug("SIZE: %zu/%zu/%zu", chunk, space, size); 359 360 /* create a buffer that we can retain until it's ACK'd */ 361 skb = sock_alloc_send_skb( 362 sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); 363 if (!skb) 364 goto maybe_error; 365 366 sp = rxrpc_skb(skb); 367 sp->rx_flags |= RXRPC_SKB_TX_BUFFER; 368 rxrpc_new_skb(skb, rxrpc_skb_new); 369 370 _debug("ALLOC SEND %p", skb); 371 372 ASSERTCMP(skb->mark, ==, 0); 373 374 _debug("HS: %u", call->conn->security_size); 375 skb_reserve(skb, call->conn->security_size); 376 skb->len += call->conn->security_size; 377 378 sp->remain = chunk; 379 if (sp->remain > skb_tailroom(skb)) 380 sp->remain = skb_tailroom(skb); 381 382 _net("skb: hr %d, tr %d, hl %d, rm %d", 383 skb_headroom(skb), 384 skb_tailroom(skb), 385 skb_headlen(skb), 386 sp->remain); 387 388 skb->ip_summed = CHECKSUM_UNNECESSARY; 389 } 390 391 _debug("append"); 392 sp = rxrpc_skb(skb); 393 394 /* append next segment of data to the current buffer */ 395 if (msg_data_left(msg) > 0) { 396 int copy = skb_tailroom(skb); 397 ASSERTCMP(copy, >, 0); 398 if (copy > msg_data_left(msg)) 399 copy = msg_data_left(msg); 400 if (copy > sp->remain) 401 copy = sp->remain; 402 403 _debug("add"); 404 ret = skb_add_data(skb, &msg->msg_iter, copy); 405 _debug("added"); 406 if (ret < 0) 407 goto efault; 408 sp->remain -= copy; 409 skb->mark += copy; 410 copied += copy; 411 if (call->tx_total_len != -1) 412 call->tx_total_len -= copy; 413 } 414 415 /* check for the far side aborting the call or a network error 416 * occurring */ 417 if (call->state == RXRPC_CALL_COMPLETE) 418 goto call_terminated; 419 420 /* add the packet to the send queue if it's now full */ 421 if (sp->remain <= 0 || 422 (msg_data_left(msg) == 0 && !more)) { 423 struct rxrpc_connection *conn = call->conn; 424 uint32_t seq; 425 size_t pad; 426 427 /* pad out if we're using security */ 428 if (conn->security_ix) { 429 pad = conn->security_size + skb->mark; 430 pad = conn->size_align - pad; 431 pad &= conn->size_align - 1; 432 _debug("pad %zu", pad); 433 if (pad) 434 skb_put_zero(skb, pad); 435 } 436 437 seq = call->tx_top + 1; 438 439 sp->hdr.seq = seq; 440 sp->hdr._rsvd = 0; 441 sp->hdr.flags = conn->out_clientflag; 442 443 if (msg_data_left(msg) == 0 && !more) 444 sp->hdr.flags |= RXRPC_LAST_PACKET; 445 else if (call->tx_top - call->tx_hard_ack < 446 call->tx_winsize) 447 sp->hdr.flags |= RXRPC_MORE_PACKETS; 448 449 ret = call->security->secure_packet( 450 call, skb, skb->mark, skb->head); 451 if (ret < 0) 452 goto out; 453 454 ret = rxrpc_queue_packet(rx, call, skb, 455 !msg_data_left(msg) && !more, 456 notify_end_tx); 457 /* Should check for failure here */ 458 skb = NULL; 459 } 460 } while (msg_data_left(msg) > 0); 461 462 success: 463 ret = copied; 464 out: 465 call->tx_pending = skb; 466 _leave(" = %d", ret); 467 return ret; 468 469 call_terminated: 470 rxrpc_free_skb(skb, rxrpc_skb_freed); 471 _leave(" = %d", call->error); 472 return call->error; 473 474 maybe_error: 475 if (copied) 476 goto success; 477 goto out; 478 479 efault: 480 ret = -EFAULT; 481 goto out; 482 } 483 484 /* 485 * extract control messages from the sendmsg() control buffer 486 */ 487 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) 488 { 489 struct cmsghdr *cmsg; 490 bool got_user_ID = false; 491 int len; 492 493 if (msg->msg_controllen == 0) 494 return -EINVAL; 495 496 for_each_cmsghdr(cmsg, msg) { 497 if (!CMSG_OK(msg, cmsg)) 498 return -EINVAL; 499 500 len = cmsg->cmsg_len - sizeof(struct cmsghdr); 501 _debug("CMSG %d, %d, %d", 502 cmsg->cmsg_level, cmsg->cmsg_type, len); 503 504 if (cmsg->cmsg_level != SOL_RXRPC) 505 continue; 506 507 switch (cmsg->cmsg_type) { 508 case RXRPC_USER_CALL_ID: 509 if (msg->msg_flags & MSG_CMSG_COMPAT) { 510 if (len != sizeof(u32)) 511 return -EINVAL; 512 p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); 513 } else { 514 if (len != sizeof(unsigned long)) 515 return -EINVAL; 516 p->call.user_call_ID = *(unsigned long *) 517 CMSG_DATA(cmsg); 518 } 519 got_user_ID = true; 520 break; 521 522 case RXRPC_ABORT: 523 if (p->command != RXRPC_CMD_SEND_DATA) 524 return -EINVAL; 525 p->command = RXRPC_CMD_SEND_ABORT; 526 if (len != sizeof(p->abort_code)) 527 return -EINVAL; 528 p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); 529 if (p->abort_code == 0) 530 return -EINVAL; 531 break; 532 533 case RXRPC_ACCEPT: 534 if (p->command != RXRPC_CMD_SEND_DATA) 535 return -EINVAL; 536 p->command = RXRPC_CMD_ACCEPT; 537 if (len != 0) 538 return -EINVAL; 539 break; 540 541 case RXRPC_EXCLUSIVE_CALL: 542 p->exclusive = true; 543 if (len != 0) 544 return -EINVAL; 545 break; 546 547 case RXRPC_UPGRADE_SERVICE: 548 p->upgrade = true; 549 if (len != 0) 550 return -EINVAL; 551 break; 552 553 case RXRPC_TX_LENGTH: 554 if (p->call.tx_total_len != -1 || len != sizeof(__s64)) 555 return -EINVAL; 556 p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 557 if (p->call.tx_total_len < 0) 558 return -EINVAL; 559 break; 560 561 case RXRPC_SET_CALL_TIMEOUT: 562 if (len & 3 || len < 4 || len > 12) 563 return -EINVAL; 564 memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); 565 p->call.nr_timeouts = len / 4; 566 if (p->call.timeouts.hard > INT_MAX / HZ) 567 return -ERANGE; 568 if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) 569 return -ERANGE; 570 if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) 571 return -ERANGE; 572 break; 573 574 default: 575 return -EINVAL; 576 } 577 } 578 579 if (!got_user_ID) 580 return -EINVAL; 581 if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 582 return -EINVAL; 583 _leave(" = 0"); 584 return 0; 585 } 586 587 /* 588 * Create a new client call for sendmsg(). 589 * - Called with the socket lock held, which it must release. 590 * - If it returns a call, the call's lock will need releasing by the caller. 591 */ 592 static struct rxrpc_call * 593 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 594 struct rxrpc_send_params *p) 595 __releases(&rx->sk.sk_lock.slock) 596 __acquires(&call->user_mutex) 597 { 598 struct rxrpc_conn_parameters cp; 599 struct rxrpc_call *call; 600 struct key *key; 601 602 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 603 604 _enter(""); 605 606 if (!msg->msg_name) { 607 release_sock(&rx->sk); 608 return ERR_PTR(-EDESTADDRREQ); 609 } 610 611 key = rx->key; 612 if (key && !rx->key->payload.data[0]) 613 key = NULL; 614 615 memset(&cp, 0, sizeof(cp)); 616 cp.local = rx->local; 617 cp.key = rx->key; 618 cp.security_level = rx->min_sec_level; 619 cp.exclusive = rx->exclusive | p->exclusive; 620 cp.upgrade = p->upgrade; 621 cp.service_id = srx->srx_service; 622 call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL, 623 atomic_inc_return(&rxrpc_debug_id)); 624 /* The socket is now unlocked */ 625 626 rxrpc_put_peer(cp.peer); 627 _leave(" = %p\n", call); 628 return call; 629 } 630 631 /* 632 * send a message forming part of a client call through an RxRPC socket 633 * - caller holds the socket locked 634 * - the socket may be either a client socket or a server socket 635 */ 636 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 637 __releases(&rx->sk.sk_lock.slock) 638 __releases(&call->user_mutex) 639 { 640 enum rxrpc_call_state state; 641 struct rxrpc_call *call; 642 unsigned long now, j; 643 int ret; 644 645 struct rxrpc_send_params p = { 646 .call.tx_total_len = -1, 647 .call.user_call_ID = 0, 648 .call.nr_timeouts = 0, 649 .call.interruptibility = RXRPC_INTERRUPTIBLE, 650 .abort_code = 0, 651 .command = RXRPC_CMD_SEND_DATA, 652 .exclusive = false, 653 .upgrade = false, 654 }; 655 656 _enter(""); 657 658 ret = rxrpc_sendmsg_cmsg(msg, &p); 659 if (ret < 0) 660 goto error_release_sock; 661 662 if (p.command == RXRPC_CMD_ACCEPT) { 663 ret = -EINVAL; 664 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 665 goto error_release_sock; 666 call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); 667 /* The socket is now unlocked. */ 668 if (IS_ERR(call)) 669 return PTR_ERR(call); 670 ret = 0; 671 goto out_put_unlock; 672 } 673 674 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); 675 if (!call) { 676 ret = -EBADSLT; 677 if (p.command != RXRPC_CMD_SEND_DATA) 678 goto error_release_sock; 679 call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p); 680 /* The socket is now unlocked... */ 681 if (IS_ERR(call)) 682 return PTR_ERR(call); 683 /* ... and we have the call lock. */ 684 ret = 0; 685 if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) 686 goto out_put_unlock; 687 } else { 688 switch (READ_ONCE(call->state)) { 689 case RXRPC_CALL_UNINITIALISED: 690 case RXRPC_CALL_CLIENT_AWAIT_CONN: 691 case RXRPC_CALL_SERVER_PREALLOC: 692 case RXRPC_CALL_SERVER_SECURING: 693 case RXRPC_CALL_SERVER_ACCEPTING: 694 rxrpc_put_call(call, rxrpc_call_put); 695 ret = -EBUSY; 696 goto error_release_sock; 697 default: 698 break; 699 } 700 701 ret = mutex_lock_interruptible(&call->user_mutex); 702 release_sock(&rx->sk); 703 if (ret < 0) { 704 ret = -ERESTARTSYS; 705 goto error_put; 706 } 707 708 if (p.call.tx_total_len != -1) { 709 ret = -EINVAL; 710 if (call->tx_total_len != -1 || 711 call->tx_pending || 712 call->tx_top != 0) 713 goto error_put; 714 call->tx_total_len = p.call.tx_total_len; 715 } 716 } 717 718 switch (p.call.nr_timeouts) { 719 case 3: 720 j = msecs_to_jiffies(p.call.timeouts.normal); 721 if (p.call.timeouts.normal > 0 && j == 0) 722 j = 1; 723 WRITE_ONCE(call->next_rx_timo, j); 724 /* Fall through */ 725 case 2: 726 j = msecs_to_jiffies(p.call.timeouts.idle); 727 if (p.call.timeouts.idle > 0 && j == 0) 728 j = 1; 729 WRITE_ONCE(call->next_req_timo, j); 730 /* Fall through */ 731 case 1: 732 if (p.call.timeouts.hard > 0) { 733 j = msecs_to_jiffies(p.call.timeouts.hard); 734 now = jiffies; 735 j += now; 736 WRITE_ONCE(call->expect_term_by, j); 737 rxrpc_reduce_call_timer(call, j, now, 738 rxrpc_timer_set_for_hard); 739 } 740 break; 741 } 742 743 state = READ_ONCE(call->state); 744 _debug("CALL %d USR %lx ST %d on CONN %p", 745 call->debug_id, call->user_call_ID, state, call->conn); 746 747 if (state >= RXRPC_CALL_COMPLETE) { 748 /* it's too late for this call */ 749 ret = -ESHUTDOWN; 750 } else if (p.command == RXRPC_CMD_SEND_ABORT) { 751 ret = 0; 752 if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED)) 753 ret = rxrpc_send_abort_packet(call); 754 } else if (p.command != RXRPC_CMD_SEND_DATA) { 755 ret = -EINVAL; 756 } else if (rxrpc_is_client_call(call) && 757 state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 758 /* request phase complete for this client call */ 759 ret = -EPROTO; 760 } else if (rxrpc_is_service_call(call) && 761 state != RXRPC_CALL_SERVER_ACK_REQUEST && 762 state != RXRPC_CALL_SERVER_SEND_REPLY) { 763 /* Reply phase not begun or not complete for service call. */ 764 ret = -EPROTO; 765 } else { 766 ret = rxrpc_send_data(rx, call, msg, len, NULL); 767 } 768 769 out_put_unlock: 770 mutex_unlock(&call->user_mutex); 771 error_put: 772 rxrpc_put_call(call, rxrpc_call_put); 773 _leave(" = %d", ret); 774 return ret; 775 776 error_release_sock: 777 release_sock(&rx->sk); 778 return ret; 779 } 780 781 /** 782 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call 783 * @sock: The socket the call is on 784 * @call: The call to send data through 785 * @msg: The data to send 786 * @len: The amount of data to send 787 * @notify_end_tx: Notification that the last packet is queued. 788 * 789 * Allow a kernel service to send data on a call. The call must be in an state 790 * appropriate to sending data. No control data should be supplied in @msg, 791 * nor should an address be supplied. MSG_MORE should be flagged if there's 792 * more data to come, otherwise this data will end the transmission phase. 793 */ 794 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, 795 struct msghdr *msg, size_t len, 796 rxrpc_notify_end_tx_t notify_end_tx) 797 { 798 int ret; 799 800 _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); 801 802 ASSERTCMP(msg->msg_name, ==, NULL); 803 ASSERTCMP(msg->msg_control, ==, NULL); 804 805 mutex_lock(&call->user_mutex); 806 807 _debug("CALL %d USR %lx ST %d on CONN %p", 808 call->debug_id, call->user_call_ID, call->state, call->conn); 809 810 switch (READ_ONCE(call->state)) { 811 case RXRPC_CALL_CLIENT_SEND_REQUEST: 812 case RXRPC_CALL_SERVER_ACK_REQUEST: 813 case RXRPC_CALL_SERVER_SEND_REPLY: 814 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 815 notify_end_tx); 816 break; 817 case RXRPC_CALL_COMPLETE: 818 read_lock_bh(&call->state_lock); 819 ret = call->error; 820 read_unlock_bh(&call->state_lock); 821 break; 822 default: 823 /* Request phase complete for this client call */ 824 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send")); 825 ret = -EPROTO; 826 break; 827 } 828 829 mutex_unlock(&call->user_mutex); 830 _leave(" = %d", ret); 831 return ret; 832 } 833 EXPORT_SYMBOL(rxrpc_kernel_send_data); 834 835 /** 836 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 837 * @sock: The socket the call is on 838 * @call: The call to be aborted 839 * @abort_code: The abort code to stick into the ABORT packet 840 * @error: Local error value 841 * @why: 3-char string indicating why. 842 * 843 * Allow a kernel service to abort a call, if it's still in an abortable state 844 * and return true if the call was aborted, false if it was already complete. 845 */ 846 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, 847 u32 abort_code, int error, const char *why) 848 { 849 bool aborted; 850 851 _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); 852 853 mutex_lock(&call->user_mutex); 854 855 aborted = rxrpc_abort_call(why, call, 0, abort_code, error); 856 if (aborted) 857 rxrpc_send_abort_packet(call); 858 859 mutex_unlock(&call->user_mutex); 860 return aborted; 861 } 862 EXPORT_SYMBOL(rxrpc_kernel_abort_call); 863 864 /** 865 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call 866 * @sock: The socket the call is on 867 * @call: The call to be informed 868 * @tx_total_len: The amount of data to be transmitted for this call 869 * 870 * Allow a kernel service to set the total transmit length on a call. This 871 * allows buffer-to-packet encrypt-and-copy to be performed. 872 * 873 * This function is primarily for use for setting the reply length since the 874 * request length can be set when beginning the call. 875 */ 876 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, 877 s64 tx_total_len) 878 { 879 WARN_ON(call->tx_total_len != -1); 880 call->tx_total_len = tx_total_len; 881 } 882 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); 883