1 /* AF_RXRPC sendmsg() implementation. 2 * 3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/net.h> 15 #include <linux/gfp.h> 16 #include <linux/skbuff.h> 17 #include <linux/export.h> 18 #include <linux/sched/signal.h> 19 20 #include <net/sock.h> 21 #include <net/af_rxrpc.h> 22 #include "ar-internal.h" 23 24 enum rxrpc_command { 25 RXRPC_CMD_SEND_DATA, /* send data message */ 26 RXRPC_CMD_SEND_ABORT, /* request abort generation */ 27 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ 28 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ 29 }; 30 31 struct rxrpc_send_params { 32 s64 tx_total_len; /* Total Tx data length (if send data) */ 33 unsigned long user_call_ID; /* User's call ID */ 34 u32 abort_code; /* Abort code to Tx (if abort) */ 35 enum rxrpc_command command : 8; /* The command to implement */ 36 bool exclusive; /* Shared or exclusive call */ 37 bool upgrade; /* If the connection is upgradeable */ 38 }; 39 40 /* 41 * wait for space to appear in the transmit/ACK window 42 * - caller holds the socket locked 43 */ 44 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, 45 struct rxrpc_call *call, 46 long *timeo) 47 { 48 DECLARE_WAITQUEUE(myself, current); 49 int ret; 50 51 _enter(",{%u,%u,%u}", 52 call->tx_hard_ack, call->tx_top, call->tx_winsize); 53 54 add_wait_queue(&call->waitq, &myself); 55 56 for (;;) { 57 set_current_state(TASK_INTERRUPTIBLE); 58 ret = 0; 59 if (call->tx_top - call->tx_hard_ack < 60 min_t(unsigned int, call->tx_winsize, 61 call->cong_cwnd + call->cong_extra)) 62 break; 63 if (call->state >= RXRPC_CALL_COMPLETE) { 64 ret = call->error; 65 break; 66 } 67 if (signal_pending(current)) { 68 ret = sock_intr_errno(*timeo); 69 break; 70 } 71 72 trace_rxrpc_transmit(call, rxrpc_transmit_wait); 73 mutex_unlock(&call->user_mutex); 74 *timeo = schedule_timeout(*timeo); 75 if (mutex_lock_interruptible(&call->user_mutex) < 0) { 76 ret = sock_intr_errno(*timeo); 77 break; 78 } 79 } 80 81 remove_wait_queue(&call->waitq, &myself); 82 set_current_state(TASK_RUNNING); 83 _leave(" = %d", ret); 84 return ret; 85 } 86 87 /* 88 * Schedule an instant Tx resend. 89 */ 90 static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix) 91 { 92 spin_lock_bh(&call->lock); 93 94 if (call->state < RXRPC_CALL_COMPLETE) { 95 call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS; 96 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 97 rxrpc_queue_call(call); 98 } 99 100 spin_unlock_bh(&call->lock); 101 } 102 103 /* 104 * Notify the owner of the call that the transmit phase is ended and the last 105 * packet has been queued. 106 */ 107 static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, 108 rxrpc_notify_end_tx_t notify_end_tx) 109 { 110 if (notify_end_tx) 111 notify_end_tx(&rx->sk, call, call->user_call_ID); 112 } 113 114 /* 115 * Queue a DATA packet for transmission, set the resend timeout and send the 116 * packet immediately 117 */ 118 static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, 119 struct sk_buff *skb, bool last, 120 rxrpc_notify_end_tx_t notify_end_tx) 121 { 122 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 123 rxrpc_seq_t seq = sp->hdr.seq; 124 int ret, ix; 125 u8 annotation = RXRPC_TX_ANNO_UNACK; 126 127 _net("queue skb %p [%d]", skb, seq); 128 129 ASSERTCMP(seq, ==, call->tx_top + 1); 130 131 if (last) { 132 annotation |= RXRPC_TX_ANNO_LAST; 133 set_bit(RXRPC_CALL_TX_LASTQ, &call->flags); 134 } 135 136 /* We have to set the timestamp before queueing as the retransmit 137 * algorithm can see the packet as soon as we queue it. 138 */ 139 skb->tstamp = ktime_get_real(); 140 141 ix = seq & RXRPC_RXTX_BUFF_MASK; 142 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 143 call->rxtx_annotations[ix] = annotation; 144 smp_wmb(); 145 call->rxtx_buffer[ix] = skb; 146 call->tx_top = seq; 147 if (last) 148 trace_rxrpc_transmit(call, rxrpc_transmit_queue_last); 149 else 150 trace_rxrpc_transmit(call, rxrpc_transmit_queue); 151 152 if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { 153 _debug("________awaiting reply/ACK__________"); 154 write_lock_bh(&call->state_lock); 155 switch (call->state) { 156 case RXRPC_CALL_CLIENT_SEND_REQUEST: 157 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 158 rxrpc_notify_end_tx(rx, call, notify_end_tx); 159 break; 160 case RXRPC_CALL_SERVER_ACK_REQUEST: 161 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 162 call->ack_at = call->expire_at; 163 if (call->ackr_reason == RXRPC_ACK_DELAY) 164 call->ackr_reason = 0; 165 __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply, 166 ktime_get_real()); 167 if (!last) 168 break; 169 case RXRPC_CALL_SERVER_SEND_REPLY: 170 call->state = RXRPC_CALL_SERVER_AWAIT_ACK; 171 rxrpc_notify_end_tx(rx, call, notify_end_tx); 172 break; 173 default: 174 break; 175 } 176 write_unlock_bh(&call->state_lock); 177 } 178 179 if (seq == 1 && rxrpc_is_client_call(call)) 180 rxrpc_expose_client_call(call); 181 182 ret = rxrpc_send_data_packet(call, skb, false); 183 if (ret < 0) { 184 _debug("need instant resend %d", ret); 185 rxrpc_instant_resend(call, ix); 186 } else { 187 ktime_t now = ktime_get_real(), resend_at; 188 189 resend_at = ktime_add_ms(now, rxrpc_resend_timeout); 190 191 if (ktime_before(resend_at, call->resend_at)) { 192 call->resend_at = resend_at; 193 rxrpc_set_timer(call, rxrpc_timer_set_for_send, now); 194 } 195 } 196 197 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 198 _leave(""); 199 } 200 201 /* 202 * send data through a socket 203 * - must be called in process context 204 * - The caller holds the call user access mutex, but not the socket lock. 205 */ 206 static int rxrpc_send_data(struct rxrpc_sock *rx, 207 struct rxrpc_call *call, 208 struct msghdr *msg, size_t len, 209 rxrpc_notify_end_tx_t notify_end_tx) 210 { 211 struct rxrpc_skb_priv *sp; 212 struct sk_buff *skb; 213 struct sock *sk = &rx->sk; 214 long timeo; 215 bool more; 216 int ret, copied; 217 218 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 219 220 /* this should be in poll */ 221 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 222 223 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 224 return -EPIPE; 225 226 more = msg->msg_flags & MSG_MORE; 227 228 if (call->tx_total_len != -1) { 229 if (len > call->tx_total_len) 230 return -EMSGSIZE; 231 if (!more && len != call->tx_total_len) 232 return -EMSGSIZE; 233 } 234 235 skb = call->tx_pending; 236 call->tx_pending = NULL; 237 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 238 239 copied = 0; 240 do { 241 /* Check to see if there's a ping ACK to reply to. */ 242 if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) 243 rxrpc_send_ack_packet(call, false); 244 245 if (!skb) { 246 size_t size, chunk, max, space; 247 248 _debug("alloc"); 249 250 if (call->tx_top - call->tx_hard_ack >= 251 min_t(unsigned int, call->tx_winsize, 252 call->cong_cwnd + call->cong_extra)) { 253 ret = -EAGAIN; 254 if (msg->msg_flags & MSG_DONTWAIT) 255 goto maybe_error; 256 ret = rxrpc_wait_for_tx_window(rx, call, 257 &timeo); 258 if (ret < 0) 259 goto maybe_error; 260 } 261 262 max = RXRPC_JUMBO_DATALEN; 263 max -= call->conn->security_size; 264 max &= ~(call->conn->size_align - 1UL); 265 266 chunk = max; 267 if (chunk > msg_data_left(msg) && !more) 268 chunk = msg_data_left(msg); 269 270 space = chunk + call->conn->size_align; 271 space &= ~(call->conn->size_align - 1UL); 272 273 size = space + call->conn->security_size; 274 275 _debug("SIZE: %zu/%zu/%zu", chunk, space, size); 276 277 /* create a buffer that we can retain until it's ACK'd */ 278 skb = sock_alloc_send_skb( 279 sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); 280 if (!skb) 281 goto maybe_error; 282 283 rxrpc_new_skb(skb, rxrpc_skb_tx_new); 284 285 _debug("ALLOC SEND %p", skb); 286 287 ASSERTCMP(skb->mark, ==, 0); 288 289 _debug("HS: %u", call->conn->security_size); 290 skb_reserve(skb, call->conn->security_size); 291 skb->len += call->conn->security_size; 292 293 sp = rxrpc_skb(skb); 294 sp->remain = chunk; 295 if (sp->remain > skb_tailroom(skb)) 296 sp->remain = skb_tailroom(skb); 297 298 _net("skb: hr %d, tr %d, hl %d, rm %d", 299 skb_headroom(skb), 300 skb_tailroom(skb), 301 skb_headlen(skb), 302 sp->remain); 303 304 skb->ip_summed = CHECKSUM_UNNECESSARY; 305 } 306 307 _debug("append"); 308 sp = rxrpc_skb(skb); 309 310 /* append next segment of data to the current buffer */ 311 if (msg_data_left(msg) > 0) { 312 int copy = skb_tailroom(skb); 313 ASSERTCMP(copy, >, 0); 314 if (copy > msg_data_left(msg)) 315 copy = msg_data_left(msg); 316 if (copy > sp->remain) 317 copy = sp->remain; 318 319 _debug("add"); 320 ret = skb_add_data(skb, &msg->msg_iter, copy); 321 _debug("added"); 322 if (ret < 0) 323 goto efault; 324 sp->remain -= copy; 325 skb->mark += copy; 326 copied += copy; 327 if (call->tx_total_len != -1) 328 call->tx_total_len -= copy; 329 } 330 331 /* add the packet to the send queue if it's now full */ 332 if (sp->remain <= 0 || 333 (msg_data_left(msg) == 0 && !more)) { 334 struct rxrpc_connection *conn = call->conn; 335 uint32_t seq; 336 size_t pad; 337 338 /* pad out if we're using security */ 339 if (conn->security_ix) { 340 pad = conn->security_size + skb->mark; 341 pad = conn->size_align - pad; 342 pad &= conn->size_align - 1; 343 _debug("pad %zu", pad); 344 if (pad) 345 skb_put_zero(skb, pad); 346 } 347 348 seq = call->tx_top + 1; 349 350 sp->hdr.seq = seq; 351 sp->hdr._rsvd = 0; 352 sp->hdr.flags = conn->out_clientflag; 353 354 if (msg_data_left(msg) == 0 && !more) 355 sp->hdr.flags |= RXRPC_LAST_PACKET; 356 else if (call->tx_top - call->tx_hard_ack < 357 call->tx_winsize) 358 sp->hdr.flags |= RXRPC_MORE_PACKETS; 359 360 ret = conn->security->secure_packet( 361 call, skb, skb->mark, skb->head); 362 if (ret < 0) 363 goto out; 364 365 rxrpc_queue_packet(rx, call, skb, 366 !msg_data_left(msg) && !more, 367 notify_end_tx); 368 skb = NULL; 369 } 370 371 /* Check for the far side aborting the call or a network error 372 * occurring. If this happens, save any packet that was under 373 * construction so that in the case of a network error, the 374 * call can be retried or redirected. 375 */ 376 if (call->state == RXRPC_CALL_COMPLETE) { 377 ret = call->error; 378 goto out; 379 } 380 } while (msg_data_left(msg) > 0); 381 382 success: 383 ret = copied; 384 out: 385 call->tx_pending = skb; 386 _leave(" = %d", ret); 387 return ret; 388 389 maybe_error: 390 if (copied) 391 goto success; 392 goto out; 393 394 efault: 395 ret = -EFAULT; 396 goto out; 397 } 398 399 /* 400 * extract control messages from the sendmsg() control buffer 401 */ 402 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) 403 { 404 struct cmsghdr *cmsg; 405 bool got_user_ID = false; 406 int len; 407 408 if (msg->msg_controllen == 0) 409 return -EINVAL; 410 411 for_each_cmsghdr(cmsg, msg) { 412 if (!CMSG_OK(msg, cmsg)) 413 return -EINVAL; 414 415 len = cmsg->cmsg_len - sizeof(struct cmsghdr); 416 _debug("CMSG %d, %d, %d", 417 cmsg->cmsg_level, cmsg->cmsg_type, len); 418 419 if (cmsg->cmsg_level != SOL_RXRPC) 420 continue; 421 422 switch (cmsg->cmsg_type) { 423 case RXRPC_USER_CALL_ID: 424 if (msg->msg_flags & MSG_CMSG_COMPAT) { 425 if (len != sizeof(u32)) 426 return -EINVAL; 427 p->user_call_ID = *(u32 *)CMSG_DATA(cmsg); 428 } else { 429 if (len != sizeof(unsigned long)) 430 return -EINVAL; 431 p->user_call_ID = *(unsigned long *) 432 CMSG_DATA(cmsg); 433 } 434 got_user_ID = true; 435 break; 436 437 case RXRPC_ABORT: 438 if (p->command != RXRPC_CMD_SEND_DATA) 439 return -EINVAL; 440 p->command = RXRPC_CMD_SEND_ABORT; 441 if (len != sizeof(p->abort_code)) 442 return -EINVAL; 443 p->abort_code = *(unsigned int *)CMSG_DATA(cmsg); 444 if (p->abort_code == 0) 445 return -EINVAL; 446 break; 447 448 case RXRPC_ACCEPT: 449 if (p->command != RXRPC_CMD_SEND_DATA) 450 return -EINVAL; 451 p->command = RXRPC_CMD_ACCEPT; 452 if (len != 0) 453 return -EINVAL; 454 break; 455 456 case RXRPC_EXCLUSIVE_CALL: 457 p->exclusive = true; 458 if (len != 0) 459 return -EINVAL; 460 break; 461 462 case RXRPC_UPGRADE_SERVICE: 463 p->upgrade = true; 464 if (len != 0) 465 return -EINVAL; 466 break; 467 468 case RXRPC_TX_LENGTH: 469 if (p->tx_total_len != -1 || len != sizeof(__s64)) 470 return -EINVAL; 471 p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 472 if (p->tx_total_len < 0) 473 return -EINVAL; 474 break; 475 476 default: 477 return -EINVAL; 478 } 479 } 480 481 if (!got_user_ID) 482 return -EINVAL; 483 if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 484 return -EINVAL; 485 _leave(" = 0"); 486 return 0; 487 } 488 489 /* 490 * Create a new client call for sendmsg(). 491 * - Called with the socket lock held, which it must release. 492 * - If it returns a call, the call's lock will need releasing by the caller. 493 */ 494 static struct rxrpc_call * 495 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 496 struct rxrpc_send_params *p) 497 __releases(&rx->sk.sk_lock.slock) 498 { 499 struct rxrpc_conn_parameters cp; 500 struct rxrpc_call *call; 501 struct key *key; 502 503 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 504 505 _enter(""); 506 507 if (!msg->msg_name) { 508 release_sock(&rx->sk); 509 return ERR_PTR(-EDESTADDRREQ); 510 } 511 512 key = rx->key; 513 if (key && !rx->key->payload.data[0]) 514 key = NULL; 515 516 memset(&cp, 0, sizeof(cp)); 517 cp.local = rx->local; 518 cp.key = rx->key; 519 cp.security_level = rx->min_sec_level; 520 cp.exclusive = rx->exclusive | p->exclusive; 521 cp.upgrade = p->upgrade; 522 cp.service_id = srx->srx_service; 523 call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID, 524 p->tx_total_len, GFP_KERNEL); 525 /* The socket is now unlocked */ 526 527 _leave(" = %p\n", call); 528 return call; 529 } 530 531 /* 532 * send a message forming part of a client call through an RxRPC socket 533 * - caller holds the socket locked 534 * - the socket may be either a client socket or a server socket 535 */ 536 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 537 __releases(&rx->sk.sk_lock.slock) 538 { 539 enum rxrpc_call_state state; 540 struct rxrpc_call *call; 541 int ret; 542 543 struct rxrpc_send_params p = { 544 .tx_total_len = -1, 545 .user_call_ID = 0, 546 .abort_code = 0, 547 .command = RXRPC_CMD_SEND_DATA, 548 .exclusive = false, 549 .upgrade = true, 550 }; 551 552 _enter(""); 553 554 ret = rxrpc_sendmsg_cmsg(msg, &p); 555 if (ret < 0) 556 goto error_release_sock; 557 558 if (p.command == RXRPC_CMD_ACCEPT) { 559 ret = -EINVAL; 560 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 561 goto error_release_sock; 562 call = rxrpc_accept_call(rx, p.user_call_ID, NULL); 563 /* The socket is now unlocked. */ 564 if (IS_ERR(call)) 565 return PTR_ERR(call); 566 rxrpc_put_call(call, rxrpc_call_put); 567 return 0; 568 } 569 570 call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); 571 if (!call) { 572 ret = -EBADSLT; 573 if (p.command != RXRPC_CMD_SEND_DATA) 574 goto error_release_sock; 575 call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p); 576 /* The socket is now unlocked... */ 577 if (IS_ERR(call)) 578 return PTR_ERR(call); 579 /* ... and we have the call lock. */ 580 } else { 581 switch (READ_ONCE(call->state)) { 582 case RXRPC_CALL_UNINITIALISED: 583 case RXRPC_CALL_CLIENT_AWAIT_CONN: 584 case RXRPC_CALL_SERVER_PREALLOC: 585 case RXRPC_CALL_SERVER_SECURING: 586 case RXRPC_CALL_SERVER_ACCEPTING: 587 ret = -EBUSY; 588 goto error_release_sock; 589 default: 590 break; 591 } 592 593 ret = mutex_lock_interruptible(&call->user_mutex); 594 release_sock(&rx->sk); 595 if (ret < 0) { 596 ret = -ERESTARTSYS; 597 goto error_put; 598 } 599 600 if (p.tx_total_len != -1) { 601 ret = -EINVAL; 602 if (call->tx_total_len != -1 || 603 call->tx_pending || 604 call->tx_top != 0) 605 goto error_put; 606 call->tx_total_len = p.tx_total_len; 607 } 608 } 609 610 state = READ_ONCE(call->state); 611 _debug("CALL %d USR %lx ST %d on CONN %p", 612 call->debug_id, call->user_call_ID, state, call->conn); 613 614 if (state >= RXRPC_CALL_COMPLETE) { 615 /* it's too late for this call */ 616 ret = -ESHUTDOWN; 617 } else if (p.command == RXRPC_CMD_SEND_ABORT) { 618 ret = 0; 619 if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED)) 620 ret = rxrpc_send_abort_packet(call); 621 } else if (p.command != RXRPC_CMD_SEND_DATA) { 622 ret = -EINVAL; 623 } else if (rxrpc_is_client_call(call) && 624 state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 625 /* request phase complete for this client call */ 626 ret = -EPROTO; 627 } else if (rxrpc_is_service_call(call) && 628 state != RXRPC_CALL_SERVER_ACK_REQUEST && 629 state != RXRPC_CALL_SERVER_SEND_REPLY) { 630 /* Reply phase not begun or not complete for service call. */ 631 ret = -EPROTO; 632 } else { 633 ret = rxrpc_send_data(rx, call, msg, len, NULL); 634 } 635 636 mutex_unlock(&call->user_mutex); 637 error_put: 638 rxrpc_put_call(call, rxrpc_call_put); 639 _leave(" = %d", ret); 640 return ret; 641 642 error_release_sock: 643 release_sock(&rx->sk); 644 return ret; 645 } 646 647 /** 648 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call 649 * @sock: The socket the call is on 650 * @call: The call to send data through 651 * @msg: The data to send 652 * @len: The amount of data to send 653 * @notify_end_tx: Notification that the last packet is queued. 654 * 655 * Allow a kernel service to send data on a call. The call must be in an state 656 * appropriate to sending data. No control data should be supplied in @msg, 657 * nor should an address be supplied. MSG_MORE should be flagged if there's 658 * more data to come, otherwise this data will end the transmission phase. 659 */ 660 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, 661 struct msghdr *msg, size_t len, 662 rxrpc_notify_end_tx_t notify_end_tx) 663 { 664 int ret; 665 666 _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); 667 668 ASSERTCMP(msg->msg_name, ==, NULL); 669 ASSERTCMP(msg->msg_control, ==, NULL); 670 671 mutex_lock(&call->user_mutex); 672 673 _debug("CALL %d USR %lx ST %d on CONN %p", 674 call->debug_id, call->user_call_ID, call->state, call->conn); 675 676 switch (READ_ONCE(call->state)) { 677 case RXRPC_CALL_CLIENT_SEND_REQUEST: 678 case RXRPC_CALL_SERVER_ACK_REQUEST: 679 case RXRPC_CALL_SERVER_SEND_REPLY: 680 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 681 notify_end_tx); 682 break; 683 case RXRPC_CALL_COMPLETE: 684 read_lock_bh(&call->state_lock); 685 ret = call->error; 686 read_unlock_bh(&call->state_lock); 687 break; 688 default: 689 /* Request phase complete for this client call */ 690 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send")); 691 ret = -EPROTO; 692 break; 693 } 694 695 mutex_unlock(&call->user_mutex); 696 _leave(" = %d", ret); 697 return ret; 698 } 699 EXPORT_SYMBOL(rxrpc_kernel_send_data); 700 701 /** 702 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call 703 * @sock: The socket the call is on 704 * @call: The call to be aborted 705 * @abort_code: The abort code to stick into the ABORT packet 706 * @error: Local error value 707 * @why: 3-char string indicating why. 708 * 709 * Allow a kernel service to abort a call, if it's still in an abortable state 710 * and return true if the call was aborted, false if it was already complete. 711 */ 712 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, 713 u32 abort_code, int error, const char *why) 714 { 715 bool aborted; 716 717 _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); 718 719 mutex_lock(&call->user_mutex); 720 721 aborted = rxrpc_abort_call(why, call, 0, abort_code, error); 722 if (aborted) 723 rxrpc_send_abort_packet(call); 724 725 mutex_unlock(&call->user_mutex); 726 return aborted; 727 } 728 EXPORT_SYMBOL(rxrpc_kernel_abort_call); 729 730 /** 731 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call 732 * @sock: The socket the call is on 733 * @call: The call to be informed 734 * @tx_total_len: The amount of data to be transmitted for this call 735 * 736 * Allow a kernel service to set the total transmit length on a call. This 737 * allows buffer-to-packet encrypt-and-copy to be performed. 738 * 739 * This function is primarily for use for setting the reply length since the 740 * request length can be set when beginning the call. 741 */ 742 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call, 743 s64 tx_total_len) 744 { 745 WARN_ON(call->tx_total_len != -1); 746 call->tx_total_len = tx_total_len; 747 } 748 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length); 749