1 /* Maintain an RxRPC server socket to do AFS communications through 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/slab.h> 13 #include <net/sock.h> 14 #include <net/af_rxrpc.h> 15 #include <rxrpc/packet.h> 16 #include "internal.h" 17 #include "afs_cm.h" 18 19 struct socket *afs_socket; /* my RxRPC socket */ 20 static struct workqueue_struct *afs_async_calls; 21 static struct afs_call *afs_spare_incoming_call; 22 static atomic_t afs_outstanding_calls; 23 24 static void afs_free_call(struct afs_call *); 25 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); 26 static int afs_wait_for_call_to_complete(struct afs_call *); 27 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); 28 static int afs_dont_wait_for_call_to_complete(struct afs_call *); 29 static void afs_process_async_call(struct work_struct *); 30 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 31 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); 32 static int afs_deliver_cm_op_id(struct afs_call *); 33 34 /* synchronous call management */ 35 const struct afs_wait_mode afs_sync_call = { 36 .notify_rx = afs_wake_up_call_waiter, 37 .wait = afs_wait_for_call_to_complete, 38 }; 39 40 /* asynchronous call management */ 41 const struct afs_wait_mode afs_async_call = { 42 .notify_rx = afs_wake_up_async_call, 43 .wait = afs_dont_wait_for_call_to_complete, 44 }; 45 46 /* asynchronous incoming call management */ 47 static const struct afs_wait_mode afs_async_incoming_call = { 48 .notify_rx = afs_wake_up_async_call, 49 }; 50 51 /* asynchronous incoming call initial processing */ 52 static const struct afs_call_type afs_RXCMxxxx = { 53 .name = "CB.xxxx", 54 .deliver = afs_deliver_cm_op_id, 55 .abort_to_error = afs_abort_to_error, 56 }; 57 58 static void afs_charge_preallocation(struct work_struct *); 59 60 static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation); 61 62 static int afs_wait_atomic_t(atomic_t *p) 63 { 64 schedule(); 65 return 0; 66 } 67 68 /* 69 * open an RxRPC socket and bind it to be a server for callback notifications 70 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT 71 */ 72 int afs_open_socket(void) 73 { 74 struct sockaddr_rxrpc srx; 75 struct socket *socket; 76 int ret; 77 78 _enter(""); 79 80 ret = -ENOMEM; 81 afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0); 82 if (!afs_async_calls) 83 goto error_0; 84 85 ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket); 86 if (ret < 0) 87 goto error_1; 88 89 socket->sk->sk_allocation = GFP_NOFS; 90 91 /* bind the callback manager's address to make this a server socket */ 92 srx.srx_family = AF_RXRPC; 93 srx.srx_service = CM_SERVICE; 94 srx.transport_type = SOCK_DGRAM; 95 srx.transport_len = sizeof(srx.transport.sin); 96 srx.transport.sin.sin_family = AF_INET; 97 srx.transport.sin.sin_port = htons(AFS_CM_PORT); 98 memset(&srx.transport.sin.sin_addr, 0, 99 sizeof(srx.transport.sin.sin_addr)); 100 101 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); 102 if (ret < 0) 103 goto error_2; 104 105 rxrpc_kernel_new_call_notification(socket, afs_rx_new_call, 106 afs_rx_discard_new_call); 107 108 ret = kernel_listen(socket, INT_MAX); 109 if (ret < 0) 110 goto error_2; 111 112 afs_socket = socket; 113 afs_charge_preallocation(NULL); 114 _leave(" = 0"); 115 return 0; 116 117 error_2: 118 sock_release(socket); 119 error_1: 120 destroy_workqueue(afs_async_calls); 121 error_0: 122 _leave(" = %d", ret); 123 return ret; 124 } 125 126 /* 127 * close the RxRPC socket AFS was using 128 */ 129 void afs_close_socket(void) 130 { 131 _enter(""); 132 133 if (afs_spare_incoming_call) { 134 atomic_inc(&afs_outstanding_calls); 135 afs_free_call(afs_spare_incoming_call); 136 afs_spare_incoming_call = NULL; 137 } 138 139 _debug("outstanding %u", atomic_read(&afs_outstanding_calls)); 140 wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t, 141 TASK_UNINTERRUPTIBLE); 142 _debug("no outstanding calls"); 143 144 flush_workqueue(afs_async_calls); 145 kernel_sock_shutdown(afs_socket, SHUT_RDWR); 146 flush_workqueue(afs_async_calls); 147 sock_release(afs_socket); 148 149 _debug("dework"); 150 destroy_workqueue(afs_async_calls); 151 _leave(""); 152 } 153 154 /* 155 * free a call 156 */ 157 static void afs_free_call(struct afs_call *call) 158 { 159 _debug("DONE %p{%s} [%d]", 160 call, call->type->name, atomic_read(&afs_outstanding_calls)); 161 162 ASSERTCMP(call->rxcall, ==, NULL); 163 ASSERT(!work_pending(&call->async_work)); 164 ASSERT(call->type->name != NULL); 165 166 kfree(call->request); 167 kfree(call); 168 169 if (atomic_dec_and_test(&afs_outstanding_calls)) 170 wake_up_atomic_t(&afs_outstanding_calls); 171 } 172 173 /* 174 * End a call but do not free it 175 */ 176 static void afs_end_call_nofree(struct afs_call *call) 177 { 178 if (call->rxcall) { 179 rxrpc_kernel_end_call(afs_socket, call->rxcall); 180 call->rxcall = NULL; 181 } 182 if (call->type->destructor) 183 call->type->destructor(call); 184 } 185 186 /* 187 * End a call and free it 188 */ 189 static void afs_end_call(struct afs_call *call) 190 { 191 afs_end_call_nofree(call); 192 afs_free_call(call); 193 } 194 195 /* 196 * allocate a call with flat request and reply buffers 197 */ 198 struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, 199 size_t request_size, size_t reply_max) 200 { 201 struct afs_call *call; 202 203 call = kzalloc(sizeof(*call), GFP_NOFS); 204 if (!call) 205 goto nomem_call; 206 207 _debug("CALL %p{%s} [%d]", 208 call, type->name, atomic_read(&afs_outstanding_calls)); 209 atomic_inc(&afs_outstanding_calls); 210 211 call->type = type; 212 call->request_size = request_size; 213 call->reply_max = reply_max; 214 215 if (request_size) { 216 call->request = kmalloc(request_size, GFP_NOFS); 217 if (!call->request) 218 goto nomem_free; 219 } 220 221 if (reply_max) { 222 call->buffer = kmalloc(reply_max, GFP_NOFS); 223 if (!call->buffer) 224 goto nomem_free; 225 } 226 227 init_waitqueue_head(&call->waitq); 228 return call; 229 230 nomem_free: 231 afs_free_call(call); 232 nomem_call: 233 return NULL; 234 } 235 236 /* 237 * clean up a call with flat buffer 238 */ 239 void afs_flat_call_destructor(struct afs_call *call) 240 { 241 _enter(""); 242 243 kfree(call->request); 244 call->request = NULL; 245 kfree(call->buffer); 246 call->buffer = NULL; 247 } 248 249 /* 250 * attach the data from a bunch of pages on an inode to a call 251 */ 252 static int afs_send_pages(struct afs_call *call, struct msghdr *msg) 253 { 254 struct page *pages[8]; 255 unsigned count, n, loop, offset, to; 256 pgoff_t first = call->first, last = call->last; 257 int ret; 258 259 _enter(""); 260 261 offset = call->first_offset; 262 call->first_offset = 0; 263 264 do { 265 _debug("attach %lx-%lx", first, last); 266 267 count = last - first + 1; 268 if (count > ARRAY_SIZE(pages)) 269 count = ARRAY_SIZE(pages); 270 n = find_get_pages_contig(call->mapping, first, count, pages); 271 ASSERTCMP(n, ==, count); 272 273 loop = 0; 274 do { 275 struct bio_vec bvec = {.bv_page = pages[loop], 276 .bv_offset = offset}; 277 msg->msg_flags = 0; 278 to = PAGE_SIZE; 279 if (first + loop >= last) 280 to = call->last_to; 281 else 282 msg->msg_flags = MSG_MORE; 283 bvec.bv_len = to - offset; 284 offset = 0; 285 286 _debug("- range %u-%u%s", 287 offset, to, msg->msg_flags ? " [more]" : ""); 288 iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, 289 &bvec, 1, to - offset); 290 291 /* have to change the state *before* sending the last 292 * packet as RxRPC might give us the reply before it 293 * returns from sending the request */ 294 if (first + loop >= last) 295 call->state = AFS_CALL_AWAIT_REPLY; 296 ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, 297 msg, to - offset); 298 if (ret < 0) 299 break; 300 } while (++loop < count); 301 first += count; 302 303 for (loop = 0; loop < count; loop++) 304 put_page(pages[loop]); 305 if (ret < 0) 306 break; 307 } while (first <= last); 308 309 _leave(" = %d", ret); 310 return ret; 311 } 312 313 /* 314 * initiate a call 315 */ 316 int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, 317 const struct afs_wait_mode *wait_mode) 318 { 319 struct sockaddr_rxrpc srx; 320 struct rxrpc_call *rxcall; 321 struct msghdr msg; 322 struct kvec iov[1]; 323 int ret; 324 325 _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); 326 327 ASSERT(call->type != NULL); 328 ASSERT(call->type->name != NULL); 329 330 _debug("____MAKE %p{%s,%x} [%d]____", 331 call, call->type->name, key_serial(call->key), 332 atomic_read(&afs_outstanding_calls)); 333 334 call->wait_mode = wait_mode; 335 INIT_WORK(&call->async_work, afs_process_async_call); 336 337 memset(&srx, 0, sizeof(srx)); 338 srx.srx_family = AF_RXRPC; 339 srx.srx_service = call->service_id; 340 srx.transport_type = SOCK_DGRAM; 341 srx.transport_len = sizeof(srx.transport.sin); 342 srx.transport.sin.sin_family = AF_INET; 343 srx.transport.sin.sin_port = call->port; 344 memcpy(&srx.transport.sin.sin_addr, addr, 4); 345 346 /* create a call */ 347 rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, 348 (unsigned long) call, gfp, 349 wait_mode->notify_rx); 350 call->key = NULL; 351 if (IS_ERR(rxcall)) { 352 ret = PTR_ERR(rxcall); 353 goto error_kill_call; 354 } 355 356 call->rxcall = rxcall; 357 358 /* send the request */ 359 iov[0].iov_base = call->request; 360 iov[0].iov_len = call->request_size; 361 362 msg.msg_name = NULL; 363 msg.msg_namelen = 0; 364 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, 365 call->request_size); 366 msg.msg_control = NULL; 367 msg.msg_controllen = 0; 368 msg.msg_flags = (call->send_pages ? MSG_MORE : 0); 369 370 /* have to change the state *before* sending the last packet as RxRPC 371 * might give us the reply before it returns from sending the 372 * request */ 373 if (!call->send_pages) 374 call->state = AFS_CALL_AWAIT_REPLY; 375 ret = rxrpc_kernel_send_data(afs_socket, rxcall, 376 &msg, call->request_size); 377 if (ret < 0) 378 goto error_do_abort; 379 380 if (call->send_pages) { 381 ret = afs_send_pages(call, &msg); 382 if (ret < 0) 383 goto error_do_abort; 384 } 385 386 /* at this point, an async call may no longer exist as it may have 387 * already completed */ 388 return wait_mode->wait(call); 389 390 error_do_abort: 391 rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); 392 error_kill_call: 393 afs_end_call(call); 394 _leave(" = %d", ret); 395 return ret; 396 } 397 398 /* 399 * deliver messages to a call 400 */ 401 static void afs_deliver_to_call(struct afs_call *call) 402 { 403 u32 abort_code; 404 int ret; 405 406 _enter("%s", call->type->name); 407 408 while (call->state == AFS_CALL_AWAIT_REPLY || 409 call->state == AFS_CALL_AWAIT_OP_ID || 410 call->state == AFS_CALL_AWAIT_REQUEST || 411 call->state == AFS_CALL_AWAIT_ACK 412 ) { 413 if (call->state == AFS_CALL_AWAIT_ACK) { 414 size_t offset = 0; 415 ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, 416 NULL, 0, &offset, false, 417 &call->abort_code); 418 if (ret == -EINPROGRESS || ret == -EAGAIN) 419 return; 420 if (ret == 1 || ret < 0) { 421 call->state = AFS_CALL_COMPLETE; 422 goto done; 423 } 424 return; 425 } 426 427 ret = call->type->deliver(call); 428 switch (ret) { 429 case 0: 430 if (call->state == AFS_CALL_AWAIT_REPLY) 431 call->state = AFS_CALL_COMPLETE; 432 goto done; 433 case -EINPROGRESS: 434 case -EAGAIN: 435 goto out; 436 case -ENOTCONN: 437 abort_code = RX_CALL_DEAD; 438 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 439 abort_code, -ret, "KNC"); 440 goto do_abort; 441 case -ENOTSUPP: 442 abort_code = RX_INVALID_OPERATION; 443 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 444 abort_code, -ret, "KIV"); 445 goto do_abort; 446 case -ENODATA: 447 case -EBADMSG: 448 case -EMSGSIZE: 449 default: 450 abort_code = RXGEN_CC_UNMARSHAL; 451 if (call->state != AFS_CALL_AWAIT_REPLY) 452 abort_code = RXGEN_SS_UNMARSHAL; 453 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 454 abort_code, EBADMSG, "KUM"); 455 goto do_abort; 456 } 457 } 458 459 done: 460 if (call->state == AFS_CALL_COMPLETE && call->incoming) 461 afs_end_call(call); 462 out: 463 _leave(""); 464 return; 465 466 do_abort: 467 call->error = ret; 468 call->state = AFS_CALL_COMPLETE; 469 goto done; 470 } 471 472 /* 473 * wait synchronously for a call to complete 474 */ 475 static int afs_wait_for_call_to_complete(struct afs_call *call) 476 { 477 const char *abort_why; 478 int ret; 479 480 DECLARE_WAITQUEUE(myself, current); 481 482 _enter(""); 483 484 add_wait_queue(&call->waitq, &myself); 485 for (;;) { 486 set_current_state(TASK_INTERRUPTIBLE); 487 488 /* deliver any messages that are in the queue */ 489 if (call->state < AFS_CALL_COMPLETE && call->need_attention) { 490 call->need_attention = false; 491 __set_current_state(TASK_RUNNING); 492 afs_deliver_to_call(call); 493 continue; 494 } 495 496 abort_why = "KWC"; 497 ret = call->error; 498 if (call->state == AFS_CALL_COMPLETE) 499 break; 500 abort_why = "KWI"; 501 ret = -EINTR; 502 if (signal_pending(current)) 503 break; 504 schedule(); 505 } 506 507 remove_wait_queue(&call->waitq, &myself); 508 __set_current_state(TASK_RUNNING); 509 510 /* kill the call */ 511 if (call->state < AFS_CALL_COMPLETE) { 512 _debug("call incomplete"); 513 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 514 RX_CALL_DEAD, -ret, abort_why); 515 } 516 517 _debug("call complete"); 518 afs_end_call(call); 519 _leave(" = %d", ret); 520 return ret; 521 } 522 523 /* 524 * wake up a waiting call 525 */ 526 static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall, 527 unsigned long call_user_ID) 528 { 529 struct afs_call *call = (struct afs_call *)call_user_ID; 530 531 call->need_attention = true; 532 wake_up(&call->waitq); 533 } 534 535 /* 536 * wake up an asynchronous call 537 */ 538 static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, 539 unsigned long call_user_ID) 540 { 541 struct afs_call *call = (struct afs_call *)call_user_ID; 542 543 call->need_attention = true; 544 queue_work(afs_async_calls, &call->async_work); 545 } 546 547 /* 548 * put a call into asynchronous mode 549 * - mustn't touch the call descriptor as the call my have completed by the 550 * time we get here 551 */ 552 static int afs_dont_wait_for_call_to_complete(struct afs_call *call) 553 { 554 _enter(""); 555 return -EINPROGRESS; 556 } 557 558 /* 559 * delete an asynchronous call 560 */ 561 static void afs_delete_async_call(struct work_struct *work) 562 { 563 struct afs_call *call = container_of(work, struct afs_call, async_work); 564 565 _enter(""); 566 567 afs_free_call(call); 568 569 _leave(""); 570 } 571 572 /* 573 * perform processing on an asynchronous call 574 */ 575 static void afs_process_async_call(struct work_struct *work) 576 { 577 struct afs_call *call = container_of(work, struct afs_call, async_work); 578 579 _enter(""); 580 581 if (call->state < AFS_CALL_COMPLETE && call->need_attention) { 582 call->need_attention = false; 583 afs_deliver_to_call(call); 584 } 585 586 if (call->state == AFS_CALL_COMPLETE && call->wait_mode) { 587 if (call->wait_mode->async_complete) 588 call->wait_mode->async_complete(call->reply, 589 call->error); 590 call->reply = NULL; 591 592 /* kill the call */ 593 afs_end_call_nofree(call); 594 595 /* we can't just delete the call because the work item may be 596 * queued */ 597 call->async_work.func = afs_delete_async_call; 598 queue_work(afs_async_calls, &call->async_work); 599 } 600 601 _leave(""); 602 } 603 604 static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID) 605 { 606 struct afs_call *call = (struct afs_call *)user_call_ID; 607 608 call->rxcall = rxcall; 609 } 610 611 /* 612 * Charge the incoming call preallocation. 613 */ 614 static void afs_charge_preallocation(struct work_struct *work) 615 { 616 struct afs_call *call = afs_spare_incoming_call; 617 618 for (;;) { 619 if (!call) { 620 call = kzalloc(sizeof(struct afs_call), GFP_KERNEL); 621 if (!call) 622 break; 623 624 INIT_WORK(&call->async_work, afs_process_async_call); 625 call->wait_mode = &afs_async_incoming_call; 626 call->type = &afs_RXCMxxxx; 627 init_waitqueue_head(&call->waitq); 628 call->state = AFS_CALL_AWAIT_OP_ID; 629 } 630 631 if (rxrpc_kernel_charge_accept(afs_socket, 632 afs_wake_up_async_call, 633 afs_rx_attach, 634 (unsigned long)call, 635 GFP_KERNEL) < 0) 636 break; 637 call = NULL; 638 } 639 afs_spare_incoming_call = call; 640 } 641 642 /* 643 * Discard a preallocated call when a socket is shut down. 644 */ 645 static void afs_rx_discard_new_call(struct rxrpc_call *rxcall, 646 unsigned long user_call_ID) 647 { 648 struct afs_call *call = (struct afs_call *)user_call_ID; 649 650 atomic_inc(&afs_outstanding_calls); 651 call->rxcall = NULL; 652 afs_free_call(call); 653 } 654 655 /* 656 * Notification of an incoming call. 657 */ 658 static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall, 659 unsigned long user_call_ID) 660 { 661 atomic_inc(&afs_outstanding_calls); 662 queue_work(afs_wq, &afs_charge_preallocation_work); 663 } 664 665 /* 666 * Grab the operation ID from an incoming cache manager call. The socket 667 * buffer is discarded on error or if we don't yet have sufficient data. 668 */ 669 static int afs_deliver_cm_op_id(struct afs_call *call) 670 { 671 int ret; 672 673 _enter("{%zu}", call->offset); 674 675 ASSERTCMP(call->offset, <, 4); 676 677 /* the operation ID forms the first four bytes of the request data */ 678 ret = afs_extract_data(call, &call->tmp, 4, true); 679 if (ret < 0) 680 return ret; 681 682 call->operation_ID = ntohl(call->tmp); 683 call->state = AFS_CALL_AWAIT_REQUEST; 684 call->offset = 0; 685 686 /* ask the cache manager to route the call (it'll change the call type 687 * if successful) */ 688 if (!afs_cm_incoming_call(call)) 689 return -ENOTSUPP; 690 691 /* pass responsibility for the remainer of this message off to the 692 * cache manager op */ 693 return call->type->deliver(call); 694 } 695 696 /* 697 * send an empty reply 698 */ 699 void afs_send_empty_reply(struct afs_call *call) 700 { 701 struct msghdr msg; 702 703 _enter(""); 704 705 msg.msg_name = NULL; 706 msg.msg_namelen = 0; 707 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0); 708 msg.msg_control = NULL; 709 msg.msg_controllen = 0; 710 msg.msg_flags = 0; 711 712 call->state = AFS_CALL_AWAIT_ACK; 713 switch (rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, 0)) { 714 case 0: 715 _leave(" [replied]"); 716 return; 717 718 case -ENOMEM: 719 _debug("oom"); 720 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 721 RX_USER_ABORT, ENOMEM, "KOO"); 722 default: 723 afs_end_call(call); 724 _leave(" [error]"); 725 return; 726 } 727 } 728 729 /* 730 * send a simple reply 731 */ 732 void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) 733 { 734 struct msghdr msg; 735 struct kvec iov[1]; 736 int n; 737 738 _enter(""); 739 740 iov[0].iov_base = (void *) buf; 741 iov[0].iov_len = len; 742 msg.msg_name = NULL; 743 msg.msg_namelen = 0; 744 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len); 745 msg.msg_control = NULL; 746 msg.msg_controllen = 0; 747 msg.msg_flags = 0; 748 749 call->state = AFS_CALL_AWAIT_ACK; 750 n = rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, len); 751 if (n >= 0) { 752 /* Success */ 753 _leave(" [replied]"); 754 return; 755 } 756 757 if (n == -ENOMEM) { 758 _debug("oom"); 759 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 760 RX_USER_ABORT, ENOMEM, "KOO"); 761 } 762 afs_end_call(call); 763 _leave(" [error]"); 764 } 765 766 /* 767 * Extract a piece of data from the received data socket buffers. 768 */ 769 int afs_extract_data(struct afs_call *call, void *buf, size_t count, 770 bool want_more) 771 { 772 int ret; 773 774 _enter("{%s,%zu},,%zu,%d", 775 call->type->name, call->offset, count, want_more); 776 777 ASSERTCMP(call->offset, <=, count); 778 779 ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, 780 buf, count, &call->offset, 781 want_more, &call->abort_code); 782 if (ret == 0 || ret == -EAGAIN) 783 return ret; 784 785 if (ret == 1) { 786 switch (call->state) { 787 case AFS_CALL_AWAIT_REPLY: 788 call->state = AFS_CALL_COMPLETE; 789 break; 790 case AFS_CALL_AWAIT_REQUEST: 791 call->state = AFS_CALL_REPLYING; 792 break; 793 default: 794 break; 795 } 796 return 0; 797 } 798 799 if (ret == -ECONNABORTED) 800 call->error = call->type->abort_to_error(call->abort_code); 801 else 802 call->error = ret; 803 call->state = AFS_CALL_COMPLETE; 804 return ret; 805 } 806