1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AF_RXRPC sendmsg() implementation. 3 * 4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/gfp.h> 12 #include <linux/skbuff.h> 13 #include <linux/export.h> 14 #include <linux/sched/signal.h> 15 16 #include <net/sock.h> 17 #include <net/af_rxrpc.h> 18 #include "ar-internal.h" 19 20 /* 21 * Return true if there's sufficient Tx queue space. 22 */ 23 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) 24 { 25 unsigned int win_size = 26 min_t(unsigned int, call->tx_winsize, 27 call->cong_cwnd + call->cong_extra); 28 rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack); 29 30 if (_tx_win) 31 *_tx_win = tx_win; 32 return call->tx_top - tx_win < win_size; 33 } 34 35 /* 36 * Wait for space to appear in the Tx queue or a signal to occur. 37 */ 38 static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, 39 struct rxrpc_call *call, 40 long *timeo) 41 { 42 for (;;) { 43 set_current_state(TASK_INTERRUPTIBLE); 44 if (rxrpc_check_tx_space(call, NULL)) 45 return 0; 46 47 if (call->state >= RXRPC_CALL_COMPLETE) 48 return call->error; 49 50 if (signal_pending(current)) 51 return sock_intr_errno(*timeo); 52 53 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 54 *timeo = schedule_timeout(*timeo); 55 } 56 } 57 58 /* 59 * Wait for space to appear in the Tx queue uninterruptibly, but with 60 * a timeout of 2*RTT if no progress was made and a signal occurred. 61 */ 62 static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx, 63 struct rxrpc_call *call) 64 { 65 rxrpc_seq_t tx_start, tx_win; 66 signed long rtt, timeout; 67 68 rtt = READ_ONCE(call->peer->srtt_us) >> 3; 69 rtt = usecs_to_jiffies(rtt) * 2; 70 if (rtt < 2) 71 rtt = 2; 72 73 timeout = rtt; 74 tx_start = READ_ONCE(call->tx_hard_ack); 75 76 for (;;) { 77 set_current_state(TASK_UNINTERRUPTIBLE); 78 79 tx_win = READ_ONCE(call->tx_hard_ack); 80 if (rxrpc_check_tx_space(call, &tx_win)) 81 return 0; 82 83 if (call->state >= RXRPC_CALL_COMPLETE) 84 return call->error; 85 86 if (timeout == 0 && 87 tx_win == tx_start && signal_pending(current)) 88 return -EINTR; 89 90 if (tx_win != tx_start) { 91 timeout = rtt; 92 tx_start = tx_win; 93 } 94 95 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 96 timeout = schedule_timeout(timeout); 97 } 98 } 99 100 /* 101 * Wait for space to appear in the Tx queue uninterruptibly. 102 */ 103 static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, 104 struct rxrpc_call *call, 105 long *timeo) 106 { 107 for (;;) { 108 set_current_state(TASK_UNINTERRUPTIBLE); 109 if (rxrpc_check_tx_space(call, NULL)) 110 return 0; 111 112 if (call->state >= RXRPC_CALL_COMPLETE) 113 return call->error; 114 115 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 116 *timeo = schedule_timeout(*timeo); 117 } 118 } 119 120 /* 121 * wait for space to appear in the transmit/ACK window 122 * - caller holds the socket locked 123 */ 124 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, 125 struct rxrpc_call *call, 126 long *timeo, 127 bool waitall) 128 { 129 DECLARE_WAITQUEUE(myself, current); 130 int ret; 131 132 _enter(",{%u,%u,%u}", 133 call->tx_hard_ack, call->tx_top, call->tx_winsize); 134 135 add_wait_queue(&call->waitq, &myself); 136 137 switch (call->interruptibility) { 138 case RXRPC_INTERRUPTIBLE: 139 if (waitall) 140 ret = rxrpc_wait_for_tx_window_waitall(rx, call); 141 else 142 ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo); 143 break; 144 case RXRPC_PREINTERRUPTIBLE: 145 case RXRPC_UNINTERRUPTIBLE: 146 default: 147 ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo); 148 break; 149 } 150 151 remove_wait_queue(&call->waitq, &myself); 152 set_current_state(TASK_RUNNING); 153 _leave(" = %d", ret); 154 return ret; 155 } 156 157 /* 158 * Schedule an instant Tx resend. 159 */ 160 static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix) 161 { 162 spin_lock_bh(&call->lock); 163 164 if (call->state < RXRPC_CALL_COMPLETE) { 165 call->rxtx_annotations[ix] = 166 (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) | 167 RXRPC_TX_ANNO_RETRANS; 168 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 169 rxrpc_queue_call(call); 170 } 171 172 spin_unlock_bh(&call->lock); 173 } 174 175 /* 176 * Notify the owner of the call that the transmit phase is ended and the last 177 * packet has been queued. 178 */ 179 static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, 180 rxrpc_notify_end_tx_t notify_end_tx) 181 { 182 if (notify_end_tx) 183 notify_end_tx(&rx->sk, call, call->user_call_ID); 184 } 185 186 /* 187 * Queue a DATA packet for transmission, set the resend timeout and send 188 * the packet immediately. Returns the error from rxrpc_send_data_packet() 189 * in case the caller wants to do something with it. 190 */ 191 static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, 192 struct sk_buff *skb, bool last, 193 rxrpc_notify_end_tx_t notify_end_tx) 194 { 195 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 196 unsigned long now; 197 rxrpc_seq_t seq = sp->hdr.seq; 198 int ret, ix; 199 u8 annotation = RXRPC_TX_ANNO_UNACK; 200 201 _net("queue skb %p [%d]", skb, seq); 202 203 ASSERTCMP(seq, ==, call->tx_top + 1); 204 205 if (last) 206 annotation |= RXRPC_TX_ANNO_LAST; 207 208 /* We have to set the timestamp before queueing as the retransmit 209 * algorithm can see the packet as soon as we queue it. 210 */ 211 skb->tstamp = ktime_get_real(); 212 213 ix = seq & RXRPC_RXTX_BUFF_MASK; 214 rxrpc_get_skb(skb, rxrpc_skb_got); 215 call->rxtx_annotations[ix] = annotation; 216 smp_wmb(); 217 call->rxtx_buffer[ix] = skb; 218 call->tx_top = seq; 219 if (last) 220 trace_rxrpc_transmit(call, rxrpc_transmit_queue_last); 221 else 222 trace_rxrpc_transmit(call, rxrpc_transmit_queue); 223 224 if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { 225 _debug("________awaiting reply/ACK__________"); 226 write_lock_bh(&call->state_lock); 227 switch (call->state) { 228 case RXRPC_CALL_CLIENT_SEND_REQUEST: 229 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 230 rxrpc_notify_end_tx(rx, call, notify_end_tx); 231 break; 232 case RXRPC_CALL_SERVER_ACK_REQUEST: 233 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 234 now = jiffies; 235 WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET); 236 if (call->ackr_reason == RXRPC_ACK_DELAY) 237 call->ackr_reason = 0; 238 trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); 239 if (!last) 240 break; 241 fallthrough; 242 case RXRPC_CALL_SERVER_SEND_REPLY: 243 call->state = RXRPC_CALL_SERVER_AWAIT_ACK; 244 rxrpc_notify_end_tx(rx, call, notify_end_tx); 245 break; 246 default: 247 break; 248 } 249 write_unlock_bh(&call->state_lock); 250 } 251 252 if (seq == 1 && rxrpc_is_client_call(call)) 253 rxrpc_expose_client_call(call); 254 255 ret = rxrpc_send_data_packet(call, skb, false); 256 if (ret < 0) { 257 switch (ret) { 258 case -ENETUNREACH: 259 case -EHOSTUNREACH: 260 case -ECONNREFUSED: 261 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 262 0, ret); 263 goto out; 264 } 265 _debug("need instant resend %d", ret); 266 rxrpc_instant_resend(call, ix); 267 } else { 268 unsigned long now = jiffies; 269 unsigned long resend_at = now + call->peer->rto_j; 270 271 WRITE_ONCE(call->resend_at, resend_at); 272 rxrpc_reduce_call_timer(call, resend_at, now, 273 rxrpc_timer_set_for_send); 274 } 275 276 out: 277 rxrpc_free_skb(skb, rxrpc_skb_freed); 278 _leave(" = %d", ret); 279 return ret; 280 } 281 282 /* 283 * send data through a socket 284 * - must be called in process context 285 * - The caller holds the call user access mutex, but not the socket lock. 286 */ 287 static int rxrpc_send_data(struct rxrpc_sock *rx, 288 struct rxrpc_call *call, 289 struct msghdr *msg, size_t len, 290 rxrpc_notify_end_tx_t notify_end_tx, 291 bool *_dropped_lock) 292 { 293 struct rxrpc_skb_priv *sp; 294 struct sk_buff *skb; 295 struct sock *sk = &rx->sk; 296 enum rxrpc_call_state state; 297 long timeo; 298 bool more = msg->msg_flags & MSG_MORE; 299 int ret, copied = 0; 300 301 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 302 303 /* this should be in poll */ 304 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 305 306 reload: 307 ret = -EPIPE; 308 if (sk->sk_shutdown & SEND_SHUTDOWN) 309 goto maybe_error; 310 state = READ_ONCE(call->state); 311 ret = -ESHUTDOWN; 312 if (state >= RXRPC_CALL_COMPLETE) 313 goto maybe_error; 314 ret = -EPROTO; 315 if (state != RXRPC_CALL_CLIENT_SEND_REQUEST && 316 state != RXRPC_CALL_SERVER_ACK_REQUEST && 317 state != RXRPC_CALL_SERVER_SEND_REPLY) 318 goto maybe_error; 319 320 ret = -EMSGSIZE; 321 if (call->tx_total_len != -1) { 322 if (len - copied > call->tx_total_len) 323 goto maybe_error; 324 if (!more && len - copied != call->tx_total_len) 325 goto maybe_error; 326 } 327 328 skb = call->tx_pending; 329 call->tx_pending = NULL; 330 rxrpc_see_skb(skb, rxrpc_skb_seen); 331 332 do { 333 /* Check to see if there's a ping ACK to reply to. */ 334 if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) 335 rxrpc_send_ack_packet(call, false, NULL); 336 337 if (!skb) { 338 size_t remain, bufsize, chunk, offset; 339 340 _debug("alloc"); 341 342 if (!rxrpc_check_tx_space(call, NULL)) 343 goto wait_for_space; 344 345 /* Work out the maximum size of a packet. Assume that 346 * the security header is going to be in the padded 347 * region (enc blocksize), but the trailer is not. 348 */ 349 remain = more ? INT_MAX : msg_data_left(msg); 350 ret = call->conn->security->how_much_data(call, remain, 351 &bufsize, &chunk, &offset); 352 if (ret < 0) 353 goto maybe_error; 354 355 _debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset); 356 357 /* create a buffer that we can retain until it's ACK'd */ 358 skb = sock_alloc_send_skb( 359 sk, bufsize, msg->msg_flags & MSG_DONTWAIT, &ret); 360 if (!skb) 361 goto maybe_error; 362 363 sp = rxrpc_skb(skb); 364 sp->rx_flags |= RXRPC_SKB_TX_BUFFER; 365 rxrpc_new_skb(skb, rxrpc_skb_new); 366 367 _debug("ALLOC SEND %p", skb); 368 369 ASSERTCMP(skb->mark, ==, 0); 370 371 __skb_put(skb, offset); 372 373 sp->remain = chunk; 374 if (sp->remain > skb_tailroom(skb)) 375 sp->remain = skb_tailroom(skb); 376 377 _net("skb: hr %d, tr %d, hl %d, rm %d", 378 skb_headroom(skb), 379 skb_tailroom(skb), 380 skb_headlen(skb), 381 sp->remain); 382 383 skb->ip_summed = CHECKSUM_UNNECESSARY; 384 } 385 386 _debug("append"); 387 sp = rxrpc_skb(skb); 388 389 /* append next segment of data to the current buffer */ 390 if (msg_data_left(msg) > 0) { 391 int copy = skb_tailroom(skb); 392 ASSERTCMP(copy, >, 0); 393 if (copy > msg_data_left(msg)) 394 copy = msg_data_left(msg); 395 if (copy > sp->remain) 396 copy = sp->remain; 397 398 _debug("add"); 399 ret = skb_add_data(skb, &msg->msg_iter, copy); 400 _debug("added"); 401 if (ret < 0) 402 goto efault; 403 sp->remain -= copy; 404 skb->mark += copy; 405 copied += copy; 406 if (call->tx_total_len != -1) 407 call->tx_total_len -= copy; 408 } 409 410 /* check for the far side aborting the call or a network error 411 * occurring */ 412 if (call->state == RXRPC_CALL_COMPLETE) 413 goto call_terminated; 414 415 /* add the packet to the send queue if it's now full */ 416 if (sp->remain <= 0 || 417 (msg_data_left(msg) == 0 && !more)) { 418 struct rxrpc_connection *conn = call->conn; 419 uint32_t seq; 420 421 seq = call->tx_top + 1; 422 423 sp->hdr.seq = seq; 424 sp->hdr._rsvd = 0; 425 sp->hdr.flags = conn->out_clientflag; 426 427 if (msg_data_left(msg) == 0 && !more) 428 sp->hdr.flags |= RXRPC_LAST_PACKET; 429 else if (call->tx_top - call->tx_hard_ack < 430 call->tx_winsize) 431 sp->hdr.flags |= RXRPC_MORE_PACKETS; 432 433 ret = call->security->secure_packet(call, skb, skb->mark); 434 if (ret < 0) 435 goto out; 436 437 ret = rxrpc_queue_packet(rx, call, skb, 438 !msg_data_left(msg) && !more, 439 notify_end_tx); 440 /* Should check for failure here */ 441 skb = NULL; 442 } 443 } while (msg_data_left(msg) > 0); 444 445 success: 446 ret = copied; 447 if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) { 448 read_lock_bh(&call->state_lock); 449 if (call->error < 0) 450 ret = call->error; 451 read_unlock_bh(&call->state_lock); 452 } 453 out: 454 call->tx_pending = skb; 455 _leave(" = %d", ret); 456 return ret; 457 458 call_terminated: 459 rxrpc_free_skb(skb, rxrpc_skb_freed); 460 _leave(" = %d", call->error); 461 return call->error; 462 463 maybe_error: 464 if (copied) 465 goto success; 466 goto out; 467 468 efault: 469 ret = -EFAULT; 470 goto out; 471 472 wait_for_space: 473 ret = -EAGAIN; 474 if (msg->msg_flags & MSG_DONTWAIT) 475 goto maybe_error; 476 mutex_unlock(&call->user_mutex); 477 *_dropped_lock = true; 478 ret = rxrpc_wait_for_tx_window(rx, call, &timeo, 479 msg->msg_flags & MSG_WAITALL); 480 if (ret < 0) 481 goto maybe_error; 482 if (call->interruptibility == RXRPC_INTERRUPTIBLE) { 483 if (mutex_lock_interruptible(&call->user_mutex) < 0) { 484 ret = sock_intr_errno(timeo); 485 goto maybe_error; 486 } 487 } else { 488 mutex_lock(&call->user_mutex); 489 } 490 *_dropped_lock = false; 491 goto reload; 492 } 493 494 /* 495 * extract control messages from the sendmsg() control buffer 496 */ 497 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) 498 { 499 struct cmsghdr *cmsg; 500 bool got_user_ID = false; 501 int len; 502 503 if (msg->msg_controllen == 0) 504 return -EINVAL; 505 506 for_each_cmsghdr(cmsg, msg) { 507 if (!CMSG_OK(msg, cmsg)) 508 return -EINVAL; 509 510 len = cmsg->cmsg_len - sizeof(struct cmsghdr); 511 _debug("CMSG %d, %d, %d", 512 cmsg->cmsg_level, cmsg->cmsg_type, len); 513 514 if (cmsg->cmsg_level != SOL_RXRPC) 515 continue; 516 517 switch (cmsg->cmsg_type) { 518 case RXRPC_USER_CALL_ID: 519 if (msg->msg_flags & MSG_CMSG_COMPAT) { 520 if (len != sizeof(u32)) 521 return -EINVAL; 522 p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); 523 } else { 524 if (len != sizeof(unsigned long)) 525 return -EINVAL; 526 p->call.user_call_ID = *(unsigned long *) 527 CMSG_DATA(cmsg); 528 } 529 got_user_ID = true; 530 break; 531 532 case RXRPC_ABORT: 533 if (p->command != RXRPC_CMD_SEND_DATA) 534 return -EINVAL; 535 p->command = RXRPC_CMD_SEND_ABORT; 536 if (len != sizeof(p->abort_code)) 537 return -EINVAL; 538 p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); 539 if (p->abort_code == 0) 540 return -EINVAL; 541 break; 542 543 case RXRPC_CHARGE_ACCEPT: 544 if (p->command != RXRPC_CMD_SEND_DATA) 545 return -EINVAL; 546 p->command = RXRPC_CMD_CHARGE_ACCEPT; 547 if (len != 0) 548 return -EINVAL; 549 break; 550 551 case RXRPC_EXCLUSIVE_CALL: 552 p->exclusive = true; 553 if (len != 0) 554 return -EINVAL; 555 break; 556 557 case RXRPC_UPGRADE_SERVICE: 558 p->upgrade = true; 559 if (len != 0) 560 return -EINVAL; 561 break; 562 563 case RXRPC_TX_LENGTH: 564 if (p->call.tx_total_len != -1 || len != sizeof(__s64)) 565 return -EINVAL; 566 p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 567 if (p->call.tx_total_len < 0) 568 return -EINVAL; 569 break; 570 571 case RXRPC_SET_CALL_TIMEOUT: 572 if (len & 3 || len < 4 || len > 12) 573 return -EINVAL; 574 memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); 575 p->call.nr_timeouts = len / 4; 576 if (p->call.timeouts.hard > INT_MAX / HZ) 577 return -ERANGE; 578 if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) 579 return -ERANGE; 580 if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) 581 return -ERANGE; 582 break; 583 584 default: 585 return -EINVAL; 586 } 587 } 588 589 if (!got_user_ID) 590 return -EINVAL; 591 if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 592 return -EINVAL; 593 _leave(" = 0"); 594 return 0; 595 } 596 597 /* 598 * Create a new client call for sendmsg(). 599 * - Called with the socket lock held, which it must release. 600 * - If it returns a call, the call's lock will need releasing by the caller. 601 */ 602 static struct rxrpc_call * 603 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 604 struct rxrpc_send_params *p) 605 __releases(&rx->sk.sk_lock.slock) 606 __acquires(&call->user_mutex) 607 { 608 struct rxrpc_conn_parameters cp; 609 struct rxrpc_call *call; 610 struct key *key; 611 612 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 613 614 _enter(""); 615 616 if (!msg->msg_name) { 617 release_sock(&rx->sk); 618 return ERR_PTR(-EDESTADDRREQ); 619 } 620 621 key = rx->key; 622 if (key && !rx->key->payload.data[0]) 623 key = NULL; 624 625 memset(&cp, 0, sizeof(cp)); 626 cp.local = rx->local; 627 cp.key = rx->key; 628 cp.security_level = rx->min_sec_level; 629 cp.exclusive = rx->exclusive | p->exclusive; 630 cp.upgrade = p->upgrade; 631 cp.service_id = srx->srx_service; 632 call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL, 633 atomic_inc_return(&rxrpc_debug_id)); 634 /* The socket is now unlocked */ 635 636 rxrpc_put_peer(cp.peer); 637 _leave(" = %p\n", call); 638 return call; 639 } 640 641 /* 642 * send a message forming part of a client call through an RxRPC socket 643 * - caller holds the socket locked 644 * - the socket may be either a client socket or a server socket 645 */ 646 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 647 __releases(&rx->sk.sk_lock.slock) 648 __releases(&call->user_mutex) 649 { 650 enum rxrpc_call_state state; 651 struct rxrpc_call *call; 652 unsigned long now, j; 653 bool dropped_lock = false; 654 int ret; 655 656 struct rxrpc_send_params p = { 657 .call.tx_total_len = -1, 658 .call.user_call_ID = 0, 659 .call.nr_timeouts = 0, 660 .call.interruptibility = RXRPC_INTERRUPTIBLE, 661 .abort_code = 0, 662 .command = RXRPC_CMD_SEND_DATA, 663 .exclusive = false, 664 .upgrade = false, 665 }; 666 667 _enter(""); 668 669 ret = rxrpc_sendmsg_cmsg(msg, &p); 670 if (ret < 0) 671 goto error_release_sock; 672 673 if (p.command == RXRPC_CMD_CHARGE_ACCEPT) { 674 ret = -EINVAL; 675 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 676 goto error_release_sock; 677 ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID); 678 goto error_release_sock; 679 } 680 681 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); 682 if (!call) { 683 ret = -EBADSLT; 684 if (p.command != RXRPC_CMD_SEND_DATA) 685 goto error_release_sock; 686 call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p); 687 /* The socket is now unlocked... */ 688 if (IS_ERR(call)) 689 return PTR_ERR(call); 690 /* ... and we have the call lock. */ 691 ret = 0; 692 if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) 693 goto out_put_unlock; 694 } else { 695 switch (READ_ONCE(call->state)) { 696 case RXRPC_CALL_UNINITIALISED: 697 case RXRPC_CALL_CLIENT_AWAIT_CONN: 698 case RXRPC_CALL_SERVER_PREALLOC: 699 case RXRPC_CALL_SERVER_SECURING: 700 rxrpc_put_call(call, rxrpc_call_put); 701 ret = -EBUSY; 702 goto error_release_sock; 703 default: 704 break; 705 } 706 707 ret = mutex_lock_interruptible(&call->user_mutex); 708 release_sock(&rx->sk); 709 if (ret < 0) { 710 ret = -ERESTARTSYS; 711 goto error_put; 712 } 713 714 if (p.call.tx_total_len != -1) { 715 ret = -EINVAL; 716 if (call->tx_total_len != -1 || 717 call->tx_pending || 718 call->tx_top != 0) 719 goto error_put; 720 call->tx_total_len = p.call.tx_total_len; 721 } 722 } 723 724 switch (p.call.nr_timeouts) { 725 case 3: 726 j = msecs_to_jiffies(p.call.timeouts.normal); 727 if (p.call.timeouts.normal > 0 && j == 0) 728 j = 1; 729 WRITE_ONCE(call->next_rx_timo, j); 730 fallthrough; 731 case 2: 732 j = msecs_to_jiffies(p.call.timeouts.idle); 733 if (p.call.timeouts.idle > 0 && j == 0) 734 j = 1; 735 WRITE_ONCE(call->next_req_timo, j); 736 fallthrough; 737 case 1: 738 if (p.call.timeouts.hard > 0) { 739 j = msecs_to_jiffies(p.call.timeouts.hard); 740 now = jiffies; 741 j += now; 742 WRITE_ONCE(call->expect_term_by, j); 743 rxrpc_reduce_call_timer(call, j, now, 744 rxrpc_timer_set_for_hard); 745 } 746 break; 747 } 748 749 state = READ_ONCE(call->state); 750 _debug("CALL %d USR %lx ST %d on CONN %p", 751 call->debug_id, call->user_call_ID, state, call->conn); 752 753 if (state >= RXRPC_CALL_COMPLETE) { 754 /* it's too late for this call */ 755 ret = -ESHUTDOWN; 756 } else if (p.command == RXRPC_CMD_SEND_ABORT) { 757 ret = 0; 758 if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED)) 759 ret = rxrpc_send_abort_packet(call); 760 } else if (p.command != RXRPC_CMD_SEND_DATA) { 761 ret = -EINVAL; 762 } else { 763 ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock); 764 } 765 766 out_put_unlock: 767 if (!dropped_lock) 768 mutex_unlock(&call->user_mutex); 769 error_put: 770 rxrpc_put_call(call, rxrpc_call_put); 771 _leave(" = %d", ret); 772 return ret; 773 774 error_release_sock: 775 release_sock(&rx->sk); 776 return ret; 777 } 778 779 /** 780 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call 781 * @sock: The socket the call is on 782 * @call: The call to send data through 783 * @msg: The data to send 784 * @len: The amount of data to send 785 * @notify_end_tx: Notification that the last packet is queued. 786 * 787 * Allow a kernel service to send data on a call. The call must be in an state 788 * appropriate to sending data. No control data should be supplied in @msg, 789 * nor should an address be supplied. MSG_MORE should be flagged if there's 790 * more data to come, otherwise this data will end the transmission phase. 791 */ 792 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, 793 struct msghdr *msg, size_t len, 794 rxrpc_notify_end_tx_t notify_end_tx) 795 { 796 bool dropped_lock = false; 797 int ret; 798 799 _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); 800 801 ASSERTCMP(msg->msg_name, ==, NULL); 802 ASSERTCMP(msg->msg_control, ==, NULL); 803 804 mutex_lock(&call->user_mutex); 805 806 _debug("CALL %d USR %lx ST %d on CONN %p", 807 call->debug_id, call->user_call_ID, call->state, call->conn); 808 809 switch (READ_ONCE(call->state)) { 810 case RXRPC_CALL_CLIENT_SEND_REQUEST: 811 case RXRPC_CALL_SERVER_ACK_REQUEST: 812 case RXRPC_CALL_SERVER_SEND_REPLY: 813 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 814 notify_end_tx, &dropped_lock); 815 break; 816 case RXRPC_CALL_COMPLETE: 817 read_lock_bh(&call->state_lock); 818 ret = call->error; 819 read_unlock_bh(&call->state_lock); 820 break; 821 default: 822 /* Request phase complete for this client call */ 823 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send")); 824 ret = -EPROTO; 825 break; 826 } 827 828 if (!dropped_lock) 829 mutex_unlock(&call->user_mutex); 830 _leave(" = %d", ret); 831 return ret; 832 } 833 EXPORT_SYMBOL(rxrpc_kernel_send_data); 834 835 /** 836 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 837 * @sock: The socket the call is on 838 * @call: The call to be aborted 839 * @abort_code: The abort code to stick into the ABORT packet 840 * @error: Local error value 841 * @why: 3-char string indicating why. 842 * 843 * Allow a kernel service to abort a call, if it's still in an abortable state 844 * and return true if the call was aborted, false if it was already complete. 845 */ 846 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, 847 u32 abort_code, int error, const char *why) 848 { 849 bool aborted; 850 851 _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); 852 853 mutex_lock(&call->user_mutex); 854 855 aborted = rxrpc_abort_call(why, call, 0, abort_code, error); 856 if (aborted) 857 rxrpc_send_abort_packet(call); 858 859 mutex_unlock(&call->user_mutex); 860 return aborted; 861 } 862 EXPORT_SYMBOL(rxrpc_kernel_abort_call); 863 864 /** 865 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call 866 * @sock: The socket the call is on 867 * @call: The call to be informed 868 * @tx_total_len: The amount of data to be transmitted for this call 869 * 870 * Allow a kernel service to set the total transmit length on a call. This 871 * allows buffer-to-packet encrypt-and-copy to be performed. 872 * 873 * This function is primarily for use for setting the reply length since the 874 * request length can be set when beginning the call. 875 */ 876 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, 877 s64 tx_total_len) 878 { 879 WARN_ON(call->tx_total_len != -1); 880 call->tx_total_len = tx_total_len; 881 } 882 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); 883