1 /* Maintain an RxRPC server socket to do AFS communications through 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/slab.h> 13 #include <net/sock.h> 14 #include <net/af_rxrpc.h> 15 #include <rxrpc/packet.h> 16 #include "internal.h" 17 #include "afs_cm.h" 18 19 struct socket *afs_socket; /* my RxRPC socket */ 20 static struct workqueue_struct *afs_async_calls; 21 static struct afs_call *afs_spare_incoming_call; 22 static atomic_t afs_outstanding_calls; 23 24 static void afs_free_call(struct afs_call *); 25 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); 26 static int afs_wait_for_call_to_complete(struct afs_call *); 27 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); 28 static int afs_dont_wait_for_call_to_complete(struct afs_call *); 29 static void afs_process_async_call(struct work_struct *); 30 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 31 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); 32 static int afs_deliver_cm_op_id(struct afs_call *); 33 34 /* synchronous call management */ 35 const struct afs_wait_mode afs_sync_call = { 36 .notify_rx = afs_wake_up_call_waiter, 37 .wait = afs_wait_for_call_to_complete, 38 }; 39 40 /* asynchronous call management */ 41 const struct afs_wait_mode afs_async_call = { 42 .notify_rx = afs_wake_up_async_call, 43 .wait = afs_dont_wait_for_call_to_complete, 44 }; 45 46 /* asynchronous incoming call management */ 47 static const struct afs_wait_mode afs_async_incoming_call = { 48 .notify_rx = afs_wake_up_async_call, 49 }; 50 51 /* asynchronous incoming call initial processing */ 52 static const struct afs_call_type afs_RXCMxxxx = { 53 .name = "CB.xxxx", 54 .deliver = afs_deliver_cm_op_id, 55 .abort_to_error = afs_abort_to_error, 56 }; 57 58 static void afs_charge_preallocation(struct work_struct *); 59 60 static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation); 61 62 static int afs_wait_atomic_t(atomic_t *p) 63 { 64 schedule(); 65 return 0; 66 } 67 68 /* 69 * open an RxRPC socket and bind it to be a server for callback notifications 70 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT 71 */ 72 int afs_open_socket(void) 73 { 74 struct sockaddr_rxrpc srx; 75 struct socket *socket; 76 int ret; 77 78 _enter(""); 79 80 ret = -ENOMEM; 81 afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0); 82 if (!afs_async_calls) 83 goto error_0; 84 85 ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket); 86 if (ret < 0) 87 goto error_1; 88 89 socket->sk->sk_allocation = GFP_NOFS; 90 91 /* bind the callback manager's address to make this a server socket */ 92 srx.srx_family = AF_RXRPC; 93 srx.srx_service = CM_SERVICE; 94 srx.transport_type = SOCK_DGRAM; 95 srx.transport_len = sizeof(srx.transport.sin); 96 srx.transport.sin.sin_family = AF_INET; 97 srx.transport.sin.sin_port = htons(AFS_CM_PORT); 98 memset(&srx.transport.sin.sin_addr, 0, 99 sizeof(srx.transport.sin.sin_addr)); 100 101 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); 102 if (ret < 0) 103 goto error_2; 104 105 rxrpc_kernel_new_call_notification(socket, afs_rx_new_call, 106 afs_rx_discard_new_call); 107 108 ret = kernel_listen(socket, INT_MAX); 109 if (ret < 0) 110 goto error_2; 111 112 afs_socket = socket; 113 afs_charge_preallocation(NULL); 114 _leave(" = 0"); 115 return 0; 116 117 error_2: 118 sock_release(socket); 119 error_1: 120 destroy_workqueue(afs_async_calls); 121 error_0: 122 _leave(" = %d", ret); 123 return ret; 124 } 125 126 /* 127 * close the RxRPC socket AFS was using 128 */ 129 void afs_close_socket(void) 130 { 131 _enter(""); 132 133 if (afs_spare_incoming_call) { 134 atomic_inc(&afs_outstanding_calls); 135 afs_free_call(afs_spare_incoming_call); 136 afs_spare_incoming_call = NULL; 137 } 138 139 _debug("outstanding %u", atomic_read(&afs_outstanding_calls)); 140 wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t, 141 TASK_UNINTERRUPTIBLE); 142 _debug("no outstanding calls"); 143 144 flush_workqueue(afs_async_calls); 145 kernel_sock_shutdown(afs_socket, SHUT_RDWR); 146 flush_workqueue(afs_async_calls); 147 sock_release(afs_socket); 148 149 _debug("dework"); 150 destroy_workqueue(afs_async_calls); 151 _leave(""); 152 } 153 154 /* 155 * free a call 156 */ 157 static void afs_free_call(struct afs_call *call) 158 { 159 _debug("DONE %p{%s} [%d]", 160 call, call->type->name, atomic_read(&afs_outstanding_calls)); 161 162 ASSERTCMP(call->rxcall, ==, NULL); 163 ASSERT(!work_pending(&call->async_work)); 164 ASSERT(call->type->name != NULL); 165 166 kfree(call->request); 167 kfree(call); 168 169 if (atomic_dec_and_test(&afs_outstanding_calls)) 170 wake_up_atomic_t(&afs_outstanding_calls); 171 } 172 173 /* 174 * End a call but do not free it 175 */ 176 static void afs_end_call_nofree(struct afs_call *call) 177 { 178 if (call->rxcall) { 179 rxrpc_kernel_end_call(afs_socket, call->rxcall); 180 call->rxcall = NULL; 181 } 182 if (call->type->destructor) 183 call->type->destructor(call); 184 } 185 186 /* 187 * End a call and free it 188 */ 189 static void afs_end_call(struct afs_call *call) 190 { 191 afs_end_call_nofree(call); 192 afs_free_call(call); 193 } 194 195 /* 196 * allocate a call with flat request and reply buffers 197 */ 198 struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, 199 size_t request_size, size_t reply_max) 200 { 201 struct afs_call *call; 202 203 call = kzalloc(sizeof(*call), GFP_NOFS); 204 if (!call) 205 goto nomem_call; 206 207 _debug("CALL %p{%s} [%d]", 208 call, type->name, atomic_read(&afs_outstanding_calls)); 209 atomic_inc(&afs_outstanding_calls); 210 211 call->type = type; 212 call->request_size = request_size; 213 call->reply_max = reply_max; 214 215 if (request_size) { 216 call->request = kmalloc(request_size, GFP_NOFS); 217 if (!call->request) 218 goto nomem_free; 219 } 220 221 if (reply_max) { 222 call->buffer = kmalloc(reply_max, GFP_NOFS); 223 if (!call->buffer) 224 goto nomem_free; 225 } 226 227 init_waitqueue_head(&call->waitq); 228 return call; 229 230 nomem_free: 231 afs_free_call(call); 232 nomem_call: 233 return NULL; 234 } 235 236 /* 237 * clean up a call with flat buffer 238 */ 239 void afs_flat_call_destructor(struct afs_call *call) 240 { 241 _enter(""); 242 243 kfree(call->request); 244 call->request = NULL; 245 kfree(call->buffer); 246 call->buffer = NULL; 247 } 248 249 /* 250 * attach the data from a bunch of pages on an inode to a call 251 */ 252 static int afs_send_pages(struct afs_call *call, struct msghdr *msg, 253 struct kvec *iov) 254 { 255 struct page *pages[8]; 256 unsigned count, n, loop, offset, to; 257 pgoff_t first = call->first, last = call->last; 258 int ret; 259 260 _enter(""); 261 262 offset = call->first_offset; 263 call->first_offset = 0; 264 265 do { 266 _debug("attach %lx-%lx", first, last); 267 268 count = last - first + 1; 269 if (count > ARRAY_SIZE(pages)) 270 count = ARRAY_SIZE(pages); 271 n = find_get_pages_contig(call->mapping, first, count, pages); 272 ASSERTCMP(n, ==, count); 273 274 loop = 0; 275 do { 276 msg->msg_flags = 0; 277 to = PAGE_SIZE; 278 if (first + loop >= last) 279 to = call->last_to; 280 else 281 msg->msg_flags = MSG_MORE; 282 iov->iov_base = kmap(pages[loop]) + offset; 283 iov->iov_len = to - offset; 284 offset = 0; 285 286 _debug("- range %u-%u%s", 287 offset, to, msg->msg_flags ? " [more]" : ""); 288 iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, 289 iov, 1, to - offset); 290 291 /* have to change the state *before* sending the last 292 * packet as RxRPC might give us the reply before it 293 * returns from sending the request */ 294 if (first + loop >= last) 295 call->state = AFS_CALL_AWAIT_REPLY; 296 ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, 297 msg, to - offset); 298 kunmap(pages[loop]); 299 if (ret < 0) 300 break; 301 } while (++loop < count); 302 first += count; 303 304 for (loop = 0; loop < count; loop++) 305 put_page(pages[loop]); 306 if (ret < 0) 307 break; 308 } while (first <= last); 309 310 _leave(" = %d", ret); 311 return ret; 312 } 313 314 /* 315 * initiate a call 316 */ 317 int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, 318 const struct afs_wait_mode *wait_mode) 319 { 320 struct sockaddr_rxrpc srx; 321 struct rxrpc_call *rxcall; 322 struct msghdr msg; 323 struct kvec iov[1]; 324 int ret; 325 326 _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); 327 328 ASSERT(call->type != NULL); 329 ASSERT(call->type->name != NULL); 330 331 _debug("____MAKE %p{%s,%x} [%d]____", 332 call, call->type->name, key_serial(call->key), 333 atomic_read(&afs_outstanding_calls)); 334 335 call->wait_mode = wait_mode; 336 INIT_WORK(&call->async_work, afs_process_async_call); 337 338 memset(&srx, 0, sizeof(srx)); 339 srx.srx_family = AF_RXRPC; 340 srx.srx_service = call->service_id; 341 srx.transport_type = SOCK_DGRAM; 342 srx.transport_len = sizeof(srx.transport.sin); 343 srx.transport.sin.sin_family = AF_INET; 344 srx.transport.sin.sin_port = call->port; 345 memcpy(&srx.transport.sin.sin_addr, addr, 4); 346 347 /* create a call */ 348 rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, 349 (unsigned long) call, gfp, 350 wait_mode->notify_rx); 351 call->key = NULL; 352 if (IS_ERR(rxcall)) { 353 ret = PTR_ERR(rxcall); 354 goto error_kill_call; 355 } 356 357 call->rxcall = rxcall; 358 359 /* send the request */ 360 iov[0].iov_base = call->request; 361 iov[0].iov_len = call->request_size; 362 363 msg.msg_name = NULL; 364 msg.msg_namelen = 0; 365 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, 366 call->request_size); 367 msg.msg_control = NULL; 368 msg.msg_controllen = 0; 369 msg.msg_flags = (call->send_pages ? MSG_MORE : 0); 370 371 /* have to change the state *before* sending the last packet as RxRPC 372 * might give us the reply before it returns from sending the 373 * request */ 374 if (!call->send_pages) 375 call->state = AFS_CALL_AWAIT_REPLY; 376 ret = rxrpc_kernel_send_data(afs_socket, rxcall, 377 &msg, call->request_size); 378 if (ret < 0) 379 goto error_do_abort; 380 381 if (call->send_pages) { 382 ret = afs_send_pages(call, &msg, iov); 383 if (ret < 0) 384 goto error_do_abort; 385 } 386 387 /* at this point, an async call may no longer exist as it may have 388 * already completed */ 389 return wait_mode->wait(call); 390 391 error_do_abort: 392 rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); 393 error_kill_call: 394 afs_end_call(call); 395 _leave(" = %d", ret); 396 return ret; 397 } 398 399 /* 400 * deliver messages to a call 401 */ 402 static void afs_deliver_to_call(struct afs_call *call) 403 { 404 u32 abort_code; 405 int ret; 406 407 _enter("%s", call->type->name); 408 409 while (call->state == AFS_CALL_AWAIT_REPLY || 410 call->state == AFS_CALL_AWAIT_OP_ID || 411 call->state == AFS_CALL_AWAIT_REQUEST || 412 call->state == AFS_CALL_AWAIT_ACK 413 ) { 414 if (call->state == AFS_CALL_AWAIT_ACK) { 415 size_t offset = 0; 416 ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, 417 NULL, 0, &offset, false, 418 &call->abort_code); 419 if (ret == -EINPROGRESS || ret == -EAGAIN) 420 return; 421 if (ret == 1 || ret < 0) { 422 call->state = AFS_CALL_COMPLETE; 423 goto done; 424 } 425 return; 426 } 427 428 ret = call->type->deliver(call); 429 switch (ret) { 430 case 0: 431 if (call->state == AFS_CALL_AWAIT_REPLY) 432 call->state = AFS_CALL_COMPLETE; 433 goto done; 434 case -EINPROGRESS: 435 case -EAGAIN: 436 goto out; 437 case -ENOTCONN: 438 abort_code = RX_CALL_DEAD; 439 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 440 abort_code, -ret, "KNC"); 441 goto do_abort; 442 case -ENOTSUPP: 443 abort_code = RX_INVALID_OPERATION; 444 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 445 abort_code, -ret, "KIV"); 446 goto do_abort; 447 case -ENODATA: 448 case -EBADMSG: 449 case -EMSGSIZE: 450 default: 451 abort_code = RXGEN_CC_UNMARSHAL; 452 if (call->state != AFS_CALL_AWAIT_REPLY) 453 abort_code = RXGEN_SS_UNMARSHAL; 454 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 455 abort_code, EBADMSG, "KUM"); 456 goto do_abort; 457 } 458 } 459 460 done: 461 if (call->state == AFS_CALL_COMPLETE && call->incoming) 462 afs_end_call(call); 463 out: 464 _leave(""); 465 return; 466 467 do_abort: 468 call->error = ret; 469 call->state = AFS_CALL_COMPLETE; 470 goto done; 471 } 472 473 /* 474 * wait synchronously for a call to complete 475 */ 476 static int afs_wait_for_call_to_complete(struct afs_call *call) 477 { 478 const char *abort_why; 479 int ret; 480 481 DECLARE_WAITQUEUE(myself, current); 482 483 _enter(""); 484 485 add_wait_queue(&call->waitq, &myself); 486 for (;;) { 487 set_current_state(TASK_INTERRUPTIBLE); 488 489 /* deliver any messages that are in the queue */ 490 if (call->state < AFS_CALL_COMPLETE && call->need_attention) { 491 call->need_attention = false; 492 __set_current_state(TASK_RUNNING); 493 afs_deliver_to_call(call); 494 continue; 495 } 496 497 abort_why = "KWC"; 498 ret = call->error; 499 if (call->state == AFS_CALL_COMPLETE) 500 break; 501 abort_why = "KWI"; 502 ret = -EINTR; 503 if (signal_pending(current)) 504 break; 505 schedule(); 506 } 507 508 remove_wait_queue(&call->waitq, &myself); 509 __set_current_state(TASK_RUNNING); 510 511 /* kill the call */ 512 if (call->state < AFS_CALL_COMPLETE) { 513 _debug("call incomplete"); 514 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 515 RX_CALL_DEAD, -ret, abort_why); 516 } 517 518 _debug("call complete"); 519 afs_end_call(call); 520 _leave(" = %d", ret); 521 return ret; 522 } 523 524 /* 525 * wake up a waiting call 526 */ 527 static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall, 528 unsigned long call_user_ID) 529 { 530 struct afs_call *call = (struct afs_call *)call_user_ID; 531 532 call->need_attention = true; 533 wake_up(&call->waitq); 534 } 535 536 /* 537 * wake up an asynchronous call 538 */ 539 static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, 540 unsigned long call_user_ID) 541 { 542 struct afs_call *call = (struct afs_call *)call_user_ID; 543 544 call->need_attention = true; 545 queue_work(afs_async_calls, &call->async_work); 546 } 547 548 /* 549 * put a call into asynchronous mode 550 * - mustn't touch the call descriptor as the call my have completed by the 551 * time we get here 552 */ 553 static int afs_dont_wait_for_call_to_complete(struct afs_call *call) 554 { 555 _enter(""); 556 return -EINPROGRESS; 557 } 558 559 /* 560 * delete an asynchronous call 561 */ 562 static void afs_delete_async_call(struct work_struct *work) 563 { 564 struct afs_call *call = container_of(work, struct afs_call, async_work); 565 566 _enter(""); 567 568 afs_free_call(call); 569 570 _leave(""); 571 } 572 573 /* 574 * perform processing on an asynchronous call 575 */ 576 static void afs_process_async_call(struct work_struct *work) 577 { 578 struct afs_call *call = container_of(work, struct afs_call, async_work); 579 580 _enter(""); 581 582 if (call->state < AFS_CALL_COMPLETE && call->need_attention) { 583 call->need_attention = false; 584 afs_deliver_to_call(call); 585 } 586 587 if (call->state == AFS_CALL_COMPLETE && call->wait_mode) { 588 if (call->wait_mode->async_complete) 589 call->wait_mode->async_complete(call->reply, 590 call->error); 591 call->reply = NULL; 592 593 /* kill the call */ 594 afs_end_call_nofree(call); 595 596 /* we can't just delete the call because the work item may be 597 * queued */ 598 call->async_work.func = afs_delete_async_call; 599 queue_work(afs_async_calls, &call->async_work); 600 } 601 602 _leave(""); 603 } 604 605 static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID) 606 { 607 struct afs_call *call = (struct afs_call *)user_call_ID; 608 609 call->rxcall = rxcall; 610 } 611 612 /* 613 * Charge the incoming call preallocation. 614 */ 615 static void afs_charge_preallocation(struct work_struct *work) 616 { 617 struct afs_call *call = afs_spare_incoming_call; 618 619 for (;;) { 620 if (!call) { 621 call = kzalloc(sizeof(struct afs_call), GFP_KERNEL); 622 if (!call) 623 break; 624 625 INIT_WORK(&call->async_work, afs_process_async_call); 626 call->wait_mode = &afs_async_incoming_call; 627 call->type = &afs_RXCMxxxx; 628 init_waitqueue_head(&call->waitq); 629 call->state = AFS_CALL_AWAIT_OP_ID; 630 } 631 632 if (rxrpc_kernel_charge_accept(afs_socket, 633 afs_wake_up_async_call, 634 afs_rx_attach, 635 (unsigned long)call, 636 GFP_KERNEL) < 0) 637 break; 638 call = NULL; 639 } 640 afs_spare_incoming_call = call; 641 } 642 643 /* 644 * Discard a preallocated call when a socket is shut down. 645 */ 646 static void afs_rx_discard_new_call(struct rxrpc_call *rxcall, 647 unsigned long user_call_ID) 648 { 649 struct afs_call *call = (struct afs_call *)user_call_ID; 650 651 atomic_inc(&afs_outstanding_calls); 652 call->rxcall = NULL; 653 afs_free_call(call); 654 } 655 656 /* 657 * Notification of an incoming call. 658 */ 659 static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall, 660 unsigned long user_call_ID) 661 { 662 atomic_inc(&afs_outstanding_calls); 663 queue_work(afs_wq, &afs_charge_preallocation_work); 664 } 665 666 /* 667 * Grab the operation ID from an incoming cache manager call. The socket 668 * buffer is discarded on error or if we don't yet have sufficient data. 669 */ 670 static int afs_deliver_cm_op_id(struct afs_call *call) 671 { 672 int ret; 673 674 _enter("{%zu}", call->offset); 675 676 ASSERTCMP(call->offset, <, 4); 677 678 /* the operation ID forms the first four bytes of the request data */ 679 ret = afs_extract_data(call, &call->operation_ID, 4, true); 680 if (ret < 0) 681 return ret; 682 683 call->state = AFS_CALL_AWAIT_REQUEST; 684 call->offset = 0; 685 686 /* ask the cache manager to route the call (it'll change the call type 687 * if successful) */ 688 if (!afs_cm_incoming_call(call)) 689 return -ENOTSUPP; 690 691 /* pass responsibility for the remainer of this message off to the 692 * cache manager op */ 693 return call->type->deliver(call); 694 } 695 696 /* 697 * send an empty reply 698 */ 699 void afs_send_empty_reply(struct afs_call *call) 700 { 701 struct msghdr msg; 702 703 _enter(""); 704 705 msg.msg_name = NULL; 706 msg.msg_namelen = 0; 707 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0); 708 msg.msg_control = NULL; 709 msg.msg_controllen = 0; 710 msg.msg_flags = 0; 711 712 call->state = AFS_CALL_AWAIT_ACK; 713 switch (rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, 0)) { 714 case 0: 715 _leave(" [replied]"); 716 return; 717 718 case -ENOMEM: 719 _debug("oom"); 720 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 721 RX_USER_ABORT, ENOMEM, "KOO"); 722 default: 723 afs_end_call(call); 724 _leave(" [error]"); 725 return; 726 } 727 } 728 729 /* 730 * send a simple reply 731 */ 732 void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) 733 { 734 struct msghdr msg; 735 struct kvec iov[1]; 736 int n; 737 738 _enter(""); 739 740 iov[0].iov_base = (void *) buf; 741 iov[0].iov_len = len; 742 msg.msg_name = NULL; 743 msg.msg_namelen = 0; 744 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len); 745 msg.msg_control = NULL; 746 msg.msg_controllen = 0; 747 msg.msg_flags = 0; 748 749 call->state = AFS_CALL_AWAIT_ACK; 750 n = rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, len); 751 if (n >= 0) { 752 /* Success */ 753 _leave(" [replied]"); 754 return; 755 } 756 757 if (n == -ENOMEM) { 758 _debug("oom"); 759 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 760 RX_USER_ABORT, ENOMEM, "KOO"); 761 } 762 afs_end_call(call); 763 _leave(" [error]"); 764 } 765 766 /* 767 * Extract a piece of data from the received data socket buffers. 768 */ 769 int afs_extract_data(struct afs_call *call, void *buf, size_t count, 770 bool want_more) 771 { 772 int ret; 773 774 _enter("{%s,%zu},,%zu,%d", 775 call->type->name, call->offset, count, want_more); 776 777 ASSERTCMP(call->offset, <=, count); 778 779 ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, 780 buf, count, &call->offset, 781 want_more, &call->abort_code); 782 if (ret == 0 || ret == -EAGAIN) 783 return ret; 784 785 if (ret == 1) { 786 switch (call->state) { 787 case AFS_CALL_AWAIT_REPLY: 788 call->state = AFS_CALL_COMPLETE; 789 break; 790 case AFS_CALL_AWAIT_REQUEST: 791 call->state = AFS_CALL_REPLYING; 792 break; 793 default: 794 break; 795 } 796 return 0; 797 } 798 799 if (ret == -ECONNABORTED) 800 call->error = call->type->abort_to_error(call->abort_code); 801 else 802 call->error = ret; 803 call->state = AFS_CALL_COMPLETE; 804 return ret; 805 } 806