1 /* RxRPC recvmsg() implementation 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <linux/net.h> 15 #include <linux/skbuff.h> 16 #include <linux/export.h> 17 #include <linux/sched/signal.h> 18 19 #include <net/sock.h> 20 #include <net/af_rxrpc.h> 21 #include "ar-internal.h" 22 23 /* 24 * Post a call for attention by the socket or kernel service. Further 25 * notifications are suppressed by putting recvmsg_link on a dummy queue. 26 */ 27 void rxrpc_notify_socket(struct rxrpc_call *call) 28 { 29 struct rxrpc_sock *rx; 30 struct sock *sk; 31 32 _enter("%d", call->debug_id); 33 34 if (!list_empty(&call->recvmsg_link)) 35 return; 36 37 rcu_read_lock(); 38 39 rx = rcu_dereference(call->socket); 40 sk = &rx->sk; 41 if (rx && sk->sk_state < RXRPC_CLOSE) { 42 if (call->notify_rx) { 43 spin_lock_bh(&call->notify_lock); 44 call->notify_rx(sk, call, call->user_call_ID); 45 spin_unlock_bh(&call->notify_lock); 46 } else { 47 write_lock_bh(&rx->recvmsg_lock); 48 if (list_empty(&call->recvmsg_link)) { 49 rxrpc_get_call(call, rxrpc_call_got); 50 list_add_tail(&call->recvmsg_link, &rx->recvmsg_q); 51 } 52 write_unlock_bh(&rx->recvmsg_lock); 53 54 if (!sock_flag(sk, SOCK_DEAD)) { 55 _debug("call %ps", sk->sk_data_ready); 56 sk->sk_data_ready(sk); 57 } 58 } 59 } 60 61 rcu_read_unlock(); 62 _leave(""); 63 } 64 65 /* 66 * Pass a call terminating message to userspace. 67 */ 68 static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg) 69 { 70 u32 tmp = 0; 71 int ret; 72 73 switch (call->completion) { 74 case RXRPC_CALL_SUCCEEDED: 75 ret = 0; 76 if (rxrpc_is_service_call(call)) 77 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp); 78 break; 79 case RXRPC_CALL_REMOTELY_ABORTED: 80 tmp = call->abort_code; 81 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp); 82 break; 83 case RXRPC_CALL_LOCALLY_ABORTED: 84 tmp = call->abort_code; 85 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp); 86 break; 87 case RXRPC_CALL_NETWORK_ERROR: 88 tmp = -call->error; 89 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp); 90 break; 91 case RXRPC_CALL_LOCAL_ERROR: 92 tmp = -call->error; 93 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp); 94 break; 95 default: 96 pr_err("Invalid terminal call state %u\n", call->state); 97 BUG(); 98 break; 99 } 100 101 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack, 102 call->rx_pkt_offset, call->rx_pkt_len, ret); 103 return ret; 104 } 105 106 /* 107 * Pass back notification of a new call. The call is added to the 108 * to-be-accepted list. This means that the next call to be accepted might not 109 * be the last call seen awaiting acceptance, but unless we leave this on the 110 * front of the queue and block all other messages until someone gives us a 111 * user_ID for it, there's not a lot we can do. 112 */ 113 static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx, 114 struct rxrpc_call *call, 115 struct msghdr *msg, int flags) 116 { 117 int tmp = 0, ret; 118 119 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp); 120 121 if (ret == 0 && !(flags & MSG_PEEK)) { 122 _debug("to be accepted"); 123 write_lock_bh(&rx->recvmsg_lock); 124 list_del_init(&call->recvmsg_link); 125 write_unlock_bh(&rx->recvmsg_lock); 126 127 rxrpc_get_call(call, rxrpc_call_got); 128 write_lock(&rx->call_lock); 129 list_add_tail(&call->accept_link, &rx->to_be_accepted); 130 write_unlock(&rx->call_lock); 131 } 132 133 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret); 134 return ret; 135 } 136 137 /* 138 * End the packet reception phase. 139 */ 140 static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) 141 { 142 _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]); 143 144 trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); 145 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); 146 147 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { 148 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, 149 rxrpc_propose_ack_terminal_ack); 150 rxrpc_send_ack_packet(call, false); 151 } 152 153 write_lock_bh(&call->state_lock); 154 155 switch (call->state) { 156 case RXRPC_CALL_CLIENT_RECV_REPLY: 157 __rxrpc_call_completed(call); 158 write_unlock_bh(&call->state_lock); 159 break; 160 161 case RXRPC_CALL_SERVER_RECV_REQUEST: 162 call->tx_phase = true; 163 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 164 call->ack_at = call->expire_at; 165 write_unlock_bh(&call->state_lock); 166 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 167 rxrpc_propose_ack_processing_op); 168 break; 169 default: 170 write_unlock_bh(&call->state_lock); 171 break; 172 } 173 } 174 175 /* 176 * Discard a packet we've used up and advance the Rx window by one. 177 */ 178 static void rxrpc_rotate_rx_window(struct rxrpc_call *call) 179 { 180 struct rxrpc_skb_priv *sp; 181 struct sk_buff *skb; 182 rxrpc_serial_t serial; 183 rxrpc_seq_t hard_ack, top; 184 u8 flags; 185 int ix; 186 187 _enter("%d", call->debug_id); 188 189 hard_ack = call->rx_hard_ack; 190 top = smp_load_acquire(&call->rx_top); 191 ASSERT(before(hard_ack, top)); 192 193 hard_ack++; 194 ix = hard_ack & RXRPC_RXTX_BUFF_MASK; 195 skb = call->rxtx_buffer[ix]; 196 rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); 197 sp = rxrpc_skb(skb); 198 flags = sp->hdr.flags; 199 serial = sp->hdr.serial; 200 if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) 201 serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; 202 203 call->rxtx_buffer[ix] = NULL; 204 call->rxtx_annotations[ix] = 0; 205 /* Barrier against rxrpc_input_data(). */ 206 smp_store_release(&call->rx_hard_ack, hard_ack); 207 208 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 209 210 _debug("%u,%u,%02x", hard_ack, top, flags); 211 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); 212 if (flags & RXRPC_LAST_PACKET) { 213 rxrpc_end_rx_phase(call, serial); 214 } else { 215 /* Check to see if there's an ACK that needs sending. */ 216 if (after_eq(hard_ack, call->ackr_consumed + 2) || 217 after_eq(top, call->ackr_seen + 2) || 218 (hard_ack == top && after(hard_ack, call->ackr_consumed))) 219 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, 220 true, false, 221 rxrpc_propose_ack_rotate_rx); 222 if (call->ackr_reason) 223 rxrpc_send_ack_packet(call, false); 224 } 225 } 226 227 /* 228 * Decrypt and verify a (sub)packet. The packet's length may be changed due to 229 * padding, but if this is the case, the packet length will be resident in the 230 * socket buffer. Note that we can't modify the master skb info as the skb may 231 * be the home to multiple subpackets. 232 */ 233 static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, 234 u8 annotation, 235 unsigned int offset, unsigned int len) 236 { 237 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 238 rxrpc_seq_t seq = sp->hdr.seq; 239 u16 cksum = sp->hdr.cksum; 240 241 _enter(""); 242 243 /* For all but the head jumbo subpacket, the security checksum is in a 244 * jumbo header immediately prior to the data. 245 */ 246 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { 247 __be16 tmp; 248 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) 249 BUG(); 250 cksum = ntohs(tmp); 251 seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; 252 } 253 254 return call->conn->security->verify_packet(call, skb, offset, len, 255 seq, cksum); 256 } 257 258 /* 259 * Locate the data within a packet. This is complicated by: 260 * 261 * (1) An skb may contain a jumbo packet - so we have to find the appropriate 262 * subpacket. 263 * 264 * (2) The (sub)packets may be encrypted and, if so, the encrypted portion 265 * contains an extra header which includes the true length of the data, 266 * excluding any encrypted padding. 267 */ 268 static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb, 269 u8 *_annotation, 270 unsigned int *_offset, unsigned int *_len) 271 { 272 unsigned int offset = sizeof(struct rxrpc_wire_header); 273 unsigned int len = *_len; 274 int ret; 275 u8 annotation = *_annotation; 276 277 /* Locate the subpacket */ 278 len = skb->len - offset; 279 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { 280 offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * 281 RXRPC_JUMBO_SUBPKTLEN); 282 len = (annotation & RXRPC_RX_ANNO_JLAST) ? 283 skb->len - offset : RXRPC_JUMBO_SUBPKTLEN; 284 } 285 286 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { 287 ret = rxrpc_verify_packet(call, skb, annotation, offset, len); 288 if (ret < 0) 289 return ret; 290 *_annotation |= RXRPC_RX_ANNO_VERIFIED; 291 } 292 293 *_offset = offset; 294 *_len = len; 295 call->conn->security->locate_data(call, skb, _offset, _len); 296 return 0; 297 } 298 299 /* 300 * Deliver messages to a call. This keeps processing packets until the buffer 301 * is filled and we find either more DATA (returns 0) or the end of the DATA 302 * (returns 1). If more packets are required, it returns -EAGAIN. 303 */ 304 static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, 305 struct msghdr *msg, struct iov_iter *iter, 306 size_t len, int flags, size_t *_offset) 307 { 308 struct rxrpc_skb_priv *sp; 309 struct sk_buff *skb; 310 rxrpc_seq_t hard_ack, top, seq; 311 size_t remain; 312 bool last; 313 unsigned int rx_pkt_offset, rx_pkt_len; 314 int ix, copy, ret = -EAGAIN, ret2; 315 316 rx_pkt_offset = call->rx_pkt_offset; 317 rx_pkt_len = call->rx_pkt_len; 318 319 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) { 320 seq = call->rx_hard_ack; 321 ret = 1; 322 goto done; 323 } 324 325 /* Barriers against rxrpc_input_data(). */ 326 hard_ack = call->rx_hard_ack; 327 seq = hard_ack + 1; 328 while (top = smp_load_acquire(&call->rx_top), 329 before_eq(seq, top) 330 ) { 331 ix = seq & RXRPC_RXTX_BUFF_MASK; 332 skb = call->rxtx_buffer[ix]; 333 if (!skb) { 334 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq, 335 rx_pkt_offset, rx_pkt_len, 0); 336 break; 337 } 338 smp_rmb(); 339 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 340 sp = rxrpc_skb(skb); 341 342 if (!(flags & MSG_PEEK)) 343 trace_rxrpc_receive(call, rxrpc_receive_front, 344 sp->hdr.serial, seq); 345 346 if (msg) 347 sock_recv_timestamp(msg, sock->sk, skb); 348 349 if (rx_pkt_offset == 0) { 350 ret2 = rxrpc_locate_data(call, skb, 351 &call->rxtx_annotations[ix], 352 &rx_pkt_offset, &rx_pkt_len); 353 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq, 354 rx_pkt_offset, rx_pkt_len, ret2); 355 if (ret2 < 0) { 356 ret = ret2; 357 goto out; 358 } 359 } else { 360 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq, 361 rx_pkt_offset, rx_pkt_len, 0); 362 } 363 364 /* We have to handle short, empty and used-up DATA packets. */ 365 remain = len - *_offset; 366 copy = rx_pkt_len; 367 if (copy > remain) 368 copy = remain; 369 if (copy > 0) { 370 ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter, 371 copy); 372 if (ret2 < 0) { 373 ret = ret2; 374 goto out; 375 } 376 377 /* handle piecemeal consumption of data packets */ 378 rx_pkt_offset += copy; 379 rx_pkt_len -= copy; 380 *_offset += copy; 381 } 382 383 if (rx_pkt_len > 0) { 384 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq, 385 rx_pkt_offset, rx_pkt_len, 0); 386 ASSERTCMP(*_offset, ==, len); 387 ret = 0; 388 break; 389 } 390 391 /* The whole packet has been transferred. */ 392 last = sp->hdr.flags & RXRPC_LAST_PACKET; 393 if (!(flags & MSG_PEEK)) 394 rxrpc_rotate_rx_window(call); 395 rx_pkt_offset = 0; 396 rx_pkt_len = 0; 397 398 if (last) { 399 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top)); 400 ret = 1; 401 goto out; 402 } 403 404 seq++; 405 } 406 407 out: 408 if (!(flags & MSG_PEEK)) { 409 call->rx_pkt_offset = rx_pkt_offset; 410 call->rx_pkt_len = rx_pkt_len; 411 } 412 done: 413 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq, 414 rx_pkt_offset, rx_pkt_len, ret); 415 return ret; 416 } 417 418 /* 419 * Receive a message from an RxRPC socket 420 * - we need to be careful about two or more threads calling recvmsg 421 * simultaneously 422 */ 423 int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 424 int flags) 425 { 426 struct rxrpc_call *call; 427 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 428 struct list_head *l; 429 size_t copied = 0; 430 long timeo; 431 int ret; 432 433 DEFINE_WAIT(wait); 434 435 trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0); 436 437 if (flags & (MSG_OOB | MSG_TRUNC)) 438 return -EOPNOTSUPP; 439 440 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); 441 442 try_again: 443 lock_sock(&rx->sk); 444 445 /* Return immediately if a client socket has no outstanding calls */ 446 if (RB_EMPTY_ROOT(&rx->calls) && 447 list_empty(&rx->recvmsg_q) && 448 rx->sk.sk_state != RXRPC_SERVER_LISTENING) { 449 release_sock(&rx->sk); 450 return -ENODATA; 451 } 452 453 if (list_empty(&rx->recvmsg_q)) { 454 ret = -EWOULDBLOCK; 455 if (timeo == 0) { 456 call = NULL; 457 goto error_no_call; 458 } 459 460 release_sock(&rx->sk); 461 462 /* Wait for something to happen */ 463 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, 464 TASK_INTERRUPTIBLE); 465 ret = sock_error(&rx->sk); 466 if (ret) 467 goto wait_error; 468 469 if (list_empty(&rx->recvmsg_q)) { 470 if (signal_pending(current)) 471 goto wait_interrupted; 472 trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 473 0, 0, 0, 0); 474 timeo = schedule_timeout(timeo); 475 } 476 finish_wait(sk_sleep(&rx->sk), &wait); 477 goto try_again; 478 } 479 480 /* Find the next call and dequeue it if we're not just peeking. If we 481 * do dequeue it, that comes with a ref that we will need to release. 482 */ 483 write_lock_bh(&rx->recvmsg_lock); 484 l = rx->recvmsg_q.next; 485 call = list_entry(l, struct rxrpc_call, recvmsg_link); 486 if (!(flags & MSG_PEEK)) 487 list_del_init(&call->recvmsg_link); 488 else 489 rxrpc_get_call(call, rxrpc_call_got); 490 write_unlock_bh(&rx->recvmsg_lock); 491 492 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0); 493 494 /* We're going to drop the socket lock, so we need to lock the call 495 * against interference by sendmsg. 496 */ 497 if (!mutex_trylock(&call->user_mutex)) { 498 ret = -EWOULDBLOCK; 499 if (flags & MSG_DONTWAIT) 500 goto error_requeue_call; 501 ret = -ERESTARTSYS; 502 if (mutex_lock_interruptible(&call->user_mutex) < 0) 503 goto error_requeue_call; 504 } 505 506 release_sock(&rx->sk); 507 508 if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) 509 BUG(); 510 511 if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 512 if (flags & MSG_CMSG_COMPAT) { 513 unsigned int id32 = call->user_call_ID; 514 515 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 516 sizeof(unsigned int), &id32); 517 } else { 518 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 519 sizeof(unsigned long), 520 &call->user_call_ID); 521 } 522 if (ret < 0) 523 goto error_unlock_call; 524 } 525 526 if (msg->msg_name) { 527 struct sockaddr_rxrpc *srx = msg->msg_name; 528 size_t len = sizeof(call->peer->srx); 529 530 memcpy(msg->msg_name, &call->peer->srx, len); 531 srx->srx_service = call->service_id; 532 msg->msg_namelen = len; 533 } 534 535 switch (READ_ONCE(call->state)) { 536 case RXRPC_CALL_SERVER_ACCEPTING: 537 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); 538 break; 539 case RXRPC_CALL_CLIENT_RECV_REPLY: 540 case RXRPC_CALL_SERVER_RECV_REQUEST: 541 case RXRPC_CALL_SERVER_ACK_REQUEST: 542 ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, 543 flags, &copied); 544 if (ret == -EAGAIN) 545 ret = 0; 546 547 if (after(call->rx_top, call->rx_hard_ack) && 548 call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK]) 549 rxrpc_notify_socket(call); 550 break; 551 default: 552 ret = 0; 553 break; 554 } 555 556 if (ret < 0) 557 goto error_unlock_call; 558 559 if (call->state == RXRPC_CALL_COMPLETE) { 560 ret = rxrpc_recvmsg_term(call, msg); 561 if (ret < 0) 562 goto error_unlock_call; 563 if (!(flags & MSG_PEEK)) 564 rxrpc_release_call(rx, call); 565 msg->msg_flags |= MSG_EOR; 566 ret = 1; 567 } 568 569 if (ret == 0) 570 msg->msg_flags |= MSG_MORE; 571 else 572 msg->msg_flags &= ~MSG_MORE; 573 ret = copied; 574 575 error_unlock_call: 576 mutex_unlock(&call->user_mutex); 577 rxrpc_put_call(call, rxrpc_call_put); 578 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 579 return ret; 580 581 error_requeue_call: 582 if (!(flags & MSG_PEEK)) { 583 write_lock_bh(&rx->recvmsg_lock); 584 list_add(&call->recvmsg_link, &rx->recvmsg_q); 585 write_unlock_bh(&rx->recvmsg_lock); 586 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0); 587 } else { 588 rxrpc_put_call(call, rxrpc_call_put); 589 } 590 error_no_call: 591 release_sock(&rx->sk); 592 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 593 return ret; 594 595 wait_interrupted: 596 ret = sock_intr_errno(timeo); 597 wait_error: 598 finish_wait(sk_sleep(&rx->sk), &wait); 599 call = NULL; 600 goto error_no_call; 601 } 602 603 /** 604 * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info 605 * @sock: The socket that the call exists on 606 * @call: The call to send data through 607 * @buf: The buffer to receive into 608 * @size: The size of the buffer, including data already read 609 * @_offset: The running offset into the buffer. 610 * @want_more: True if more data is expected to be read 611 * @_abort: Where the abort code is stored if -ECONNABORTED is returned 612 * @_service: Where to store the actual service ID (may be upgraded) 613 * 614 * Allow a kernel service to receive data and pick up information about the 615 * state of a call. Returns 0 if got what was asked for and there's more 616 * available, 1 if we got what was asked for and we're at the end of the data 617 * and -EAGAIN if we need more data. 618 * 619 * Note that we may return -EAGAIN to drain empty packets at the end of the 620 * data, even if we've already copied over the requested data. 621 * 622 * This function adds the amount it transfers to *_offset, so this should be 623 * precleared as appropriate. Note that the amount remaining in the buffer is 624 * taken to be size - *_offset. 625 * 626 * *_abort should also be initialised to 0. 627 */ 628 int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, 629 void *buf, size_t size, size_t *_offset, 630 bool want_more, u32 *_abort, u16 *_service) 631 { 632 struct iov_iter iter; 633 struct kvec iov; 634 int ret; 635 636 _enter("{%d,%s},%zu/%zu,%d", 637 call->debug_id, rxrpc_call_states[call->state], 638 *_offset, size, want_more); 639 640 ASSERTCMP(*_offset, <=, size); 641 ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING); 642 643 iov.iov_base = buf + *_offset; 644 iov.iov_len = size - *_offset; 645 iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset); 646 647 mutex_lock(&call->user_mutex); 648 649 switch (READ_ONCE(call->state)) { 650 case RXRPC_CALL_CLIENT_RECV_REPLY: 651 case RXRPC_CALL_SERVER_RECV_REQUEST: 652 case RXRPC_CALL_SERVER_ACK_REQUEST: 653 ret = rxrpc_recvmsg_data(sock, call, NULL, &iter, size, 0, 654 _offset); 655 if (ret < 0) 656 goto out; 657 658 /* We can only reach here with a partially full buffer if we 659 * have reached the end of the data. We must otherwise have a 660 * full buffer or have been given -EAGAIN. 661 */ 662 if (ret == 1) { 663 if (*_offset < size) 664 goto short_data; 665 if (!want_more) 666 goto read_phase_complete; 667 ret = 0; 668 goto out; 669 } 670 671 if (!want_more) 672 goto excess_data; 673 goto out; 674 675 case RXRPC_CALL_COMPLETE: 676 goto call_complete; 677 678 default: 679 ret = -EINPROGRESS; 680 goto out; 681 } 682 683 read_phase_complete: 684 ret = 1; 685 out: 686 if (_service) 687 *_service = call->service_id; 688 mutex_unlock(&call->user_mutex); 689 _leave(" = %d [%zu,%d]", ret, *_offset, *_abort); 690 return ret; 691 692 short_data: 693 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data")); 694 ret = -EBADMSG; 695 goto out; 696 excess_data: 697 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data")); 698 ret = -EMSGSIZE; 699 goto out; 700 call_complete: 701 *_abort = call->abort_code; 702 ret = call->error; 703 if (call->completion == RXRPC_CALL_SUCCEEDED) { 704 ret = 1; 705 if (size > 0) 706 ret = -ECONNRESET; 707 } 708 goto out; 709 } 710 EXPORT_SYMBOL(rxrpc_kernel_recv_data); 711