xref: /openbmc/linux/fs/afs/rxrpc.c (revision 174cd4b1)
1 /* Maintain an RxRPC server socket to do AFS communications through
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14 
15 #include <net/sock.h>
16 #include <net/af_rxrpc.h>
17 #include <rxrpc/packet.h>
18 #include "internal.h"
19 #include "afs_cm.h"
20 
21 struct socket *afs_socket; /* my RxRPC socket */
22 static struct workqueue_struct *afs_async_calls;
23 static struct afs_call *afs_spare_incoming_call;
24 atomic_t afs_outstanding_calls;
25 
26 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
27 static int afs_wait_for_call_to_complete(struct afs_call *);
28 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
29 static void afs_process_async_call(struct work_struct *);
30 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
31 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
32 static int afs_deliver_cm_op_id(struct afs_call *);
33 
34 /* asynchronous incoming call initial processing */
35 static const struct afs_call_type afs_RXCMxxxx = {
36 	.name		= "CB.xxxx",
37 	.deliver	= afs_deliver_cm_op_id,
38 	.abort_to_error	= afs_abort_to_error,
39 };
40 
41 static void afs_charge_preallocation(struct work_struct *);
42 
43 static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation);
44 
45 static int afs_wait_atomic_t(atomic_t *p)
46 {
47 	schedule();
48 	return 0;
49 }
50 
51 /*
52  * open an RxRPC socket and bind it to be a server for callback notifications
53  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
54  */
55 int afs_open_socket(void)
56 {
57 	struct sockaddr_rxrpc srx;
58 	struct socket *socket;
59 	int ret;
60 
61 	_enter("");
62 
63 	ret = -ENOMEM;
64 	afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
65 	if (!afs_async_calls)
66 		goto error_0;
67 
68 	ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
69 	if (ret < 0)
70 		goto error_1;
71 
72 	socket->sk->sk_allocation = GFP_NOFS;
73 
74 	/* bind the callback manager's address to make this a server socket */
75 	srx.srx_family			= AF_RXRPC;
76 	srx.srx_service			= CM_SERVICE;
77 	srx.transport_type		= SOCK_DGRAM;
78 	srx.transport_len		= sizeof(srx.transport.sin);
79 	srx.transport.sin.sin_family	= AF_INET;
80 	srx.transport.sin.sin_port	= htons(AFS_CM_PORT);
81 	memset(&srx.transport.sin.sin_addr, 0,
82 	       sizeof(srx.transport.sin.sin_addr));
83 
84 	ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
85 	if (ret < 0)
86 		goto error_2;
87 
88 	rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
89 					   afs_rx_discard_new_call);
90 
91 	ret = kernel_listen(socket, INT_MAX);
92 	if (ret < 0)
93 		goto error_2;
94 
95 	afs_socket = socket;
96 	afs_charge_preallocation(NULL);
97 	_leave(" = 0");
98 	return 0;
99 
100 error_2:
101 	sock_release(socket);
102 error_1:
103 	destroy_workqueue(afs_async_calls);
104 error_0:
105 	_leave(" = %d", ret);
106 	return ret;
107 }
108 
109 /*
110  * close the RxRPC socket AFS was using
111  */
112 void afs_close_socket(void)
113 {
114 	_enter("");
115 
116 	kernel_listen(afs_socket, 0);
117 	flush_workqueue(afs_async_calls);
118 
119 	if (afs_spare_incoming_call) {
120 		afs_put_call(afs_spare_incoming_call);
121 		afs_spare_incoming_call = NULL;
122 	}
123 
124 	_debug("outstanding %u", atomic_read(&afs_outstanding_calls));
125 	wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t,
126 			 TASK_UNINTERRUPTIBLE);
127 	_debug("no outstanding calls");
128 
129 	kernel_sock_shutdown(afs_socket, SHUT_RDWR);
130 	flush_workqueue(afs_async_calls);
131 	sock_release(afs_socket);
132 
133 	_debug("dework");
134 	destroy_workqueue(afs_async_calls);
135 	_leave("");
136 }
137 
138 /*
139  * Allocate a call.
140  */
141 static struct afs_call *afs_alloc_call(const struct afs_call_type *type,
142 				       gfp_t gfp)
143 {
144 	struct afs_call *call;
145 	int o;
146 
147 	call = kzalloc(sizeof(*call), gfp);
148 	if (!call)
149 		return NULL;
150 
151 	call->type = type;
152 	atomic_set(&call->usage, 1);
153 	INIT_WORK(&call->async_work, afs_process_async_call);
154 	init_waitqueue_head(&call->waitq);
155 
156 	o = atomic_inc_return(&afs_outstanding_calls);
157 	trace_afs_call(call, afs_call_trace_alloc, 1, o,
158 		       __builtin_return_address(0));
159 	return call;
160 }
161 
162 /*
163  * Dispose of a reference on a call.
164  */
165 void afs_put_call(struct afs_call *call)
166 {
167 	int n = atomic_dec_return(&call->usage);
168 	int o = atomic_read(&afs_outstanding_calls);
169 
170 	trace_afs_call(call, afs_call_trace_put, n + 1, o,
171 		       __builtin_return_address(0));
172 
173 	ASSERTCMP(n, >=, 0);
174 	if (n == 0) {
175 		ASSERT(!work_pending(&call->async_work));
176 		ASSERT(call->type->name != NULL);
177 
178 		if (call->rxcall) {
179 			rxrpc_kernel_end_call(afs_socket, call->rxcall);
180 			call->rxcall = NULL;
181 		}
182 		if (call->type->destructor)
183 			call->type->destructor(call);
184 
185 		kfree(call->request);
186 		kfree(call);
187 
188 		o = atomic_dec_return(&afs_outstanding_calls);
189 		trace_afs_call(call, afs_call_trace_free, 0, o,
190 			       __builtin_return_address(0));
191 		if (o == 0)
192 			wake_up_atomic_t(&afs_outstanding_calls);
193 	}
194 }
195 
196 /*
197  * Queue the call for actual work.  Returns 0 unconditionally for convenience.
198  */
199 int afs_queue_call_work(struct afs_call *call)
200 {
201 	int u = atomic_inc_return(&call->usage);
202 
203 	trace_afs_call(call, afs_call_trace_work, u,
204 		       atomic_read(&afs_outstanding_calls),
205 		       __builtin_return_address(0));
206 
207 	INIT_WORK(&call->work, call->type->work);
208 
209 	if (!queue_work(afs_wq, &call->work))
210 		afs_put_call(call);
211 	return 0;
212 }
213 
214 /*
215  * allocate a call with flat request and reply buffers
216  */
217 struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
218 				     size_t request_size, size_t reply_max)
219 {
220 	struct afs_call *call;
221 
222 	call = afs_alloc_call(type, GFP_NOFS);
223 	if (!call)
224 		goto nomem_call;
225 
226 	if (request_size) {
227 		call->request_size = request_size;
228 		call->request = kmalloc(request_size, GFP_NOFS);
229 		if (!call->request)
230 			goto nomem_free;
231 	}
232 
233 	if (reply_max) {
234 		call->reply_max = reply_max;
235 		call->buffer = kmalloc(reply_max, GFP_NOFS);
236 		if (!call->buffer)
237 			goto nomem_free;
238 	}
239 
240 	init_waitqueue_head(&call->waitq);
241 	return call;
242 
243 nomem_free:
244 	afs_put_call(call);
245 nomem_call:
246 	return NULL;
247 }
248 
249 /*
250  * clean up a call with flat buffer
251  */
252 void afs_flat_call_destructor(struct afs_call *call)
253 {
254 	_enter("");
255 
256 	kfree(call->request);
257 	call->request = NULL;
258 	kfree(call->buffer);
259 	call->buffer = NULL;
260 }
261 
262 /*
263  * attach the data from a bunch of pages on an inode to a call
264  */
265 static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
266 			  struct kvec *iov)
267 {
268 	struct page *pages[8];
269 	unsigned count, n, loop, offset, to;
270 	pgoff_t first = call->first, last = call->last;
271 	int ret;
272 
273 	_enter("");
274 
275 	offset = call->first_offset;
276 	call->first_offset = 0;
277 
278 	do {
279 		_debug("attach %lx-%lx", first, last);
280 
281 		count = last - first + 1;
282 		if (count > ARRAY_SIZE(pages))
283 			count = ARRAY_SIZE(pages);
284 		n = find_get_pages_contig(call->mapping, first, count, pages);
285 		ASSERTCMP(n, ==, count);
286 
287 		loop = 0;
288 		do {
289 			msg->msg_flags = 0;
290 			to = PAGE_SIZE;
291 			if (first + loop >= last)
292 				to = call->last_to;
293 			else
294 				msg->msg_flags = MSG_MORE;
295 			iov->iov_base = kmap(pages[loop]) + offset;
296 			iov->iov_len = to - offset;
297 			offset = 0;
298 
299 			_debug("- range %u-%u%s",
300 			       offset, to, msg->msg_flags ? " [more]" : "");
301 			iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
302 				      iov, 1, to - offset);
303 
304 			/* have to change the state *before* sending the last
305 			 * packet as RxRPC might give us the reply before it
306 			 * returns from sending the request */
307 			if (first + loop >= last)
308 				call->state = AFS_CALL_AWAIT_REPLY;
309 			ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
310 						     msg, to - offset);
311 			kunmap(pages[loop]);
312 			if (ret < 0)
313 				break;
314 		} while (++loop < count);
315 		first += count;
316 
317 		for (loop = 0; loop < count; loop++)
318 			put_page(pages[loop]);
319 		if (ret < 0)
320 			break;
321 	} while (first <= last);
322 
323 	_leave(" = %d", ret);
324 	return ret;
325 }
326 
327 /*
328  * initiate a call
329  */
330 int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
331 		  bool async)
332 {
333 	struct sockaddr_rxrpc srx;
334 	struct rxrpc_call *rxcall;
335 	struct msghdr msg;
336 	struct kvec iov[1];
337 	int ret;
338 
339 	_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
340 
341 	ASSERT(call->type != NULL);
342 	ASSERT(call->type->name != NULL);
343 
344 	_debug("____MAKE %p{%s,%x} [%d]____",
345 	       call, call->type->name, key_serial(call->key),
346 	       atomic_read(&afs_outstanding_calls));
347 
348 	call->async = async;
349 
350 	memset(&srx, 0, sizeof(srx));
351 	srx.srx_family = AF_RXRPC;
352 	srx.srx_service = call->service_id;
353 	srx.transport_type = SOCK_DGRAM;
354 	srx.transport_len = sizeof(srx.transport.sin);
355 	srx.transport.sin.sin_family = AF_INET;
356 	srx.transport.sin.sin_port = call->port;
357 	memcpy(&srx.transport.sin.sin_addr, addr, 4);
358 
359 	/* create a call */
360 	rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
361 					 (unsigned long) call, gfp,
362 					 (async ?
363 					  afs_wake_up_async_call :
364 					  afs_wake_up_call_waiter));
365 	call->key = NULL;
366 	if (IS_ERR(rxcall)) {
367 		ret = PTR_ERR(rxcall);
368 		goto error_kill_call;
369 	}
370 
371 	call->rxcall = rxcall;
372 
373 	/* send the request */
374 	iov[0].iov_base	= call->request;
375 	iov[0].iov_len	= call->request_size;
376 
377 	msg.msg_name		= NULL;
378 	msg.msg_namelen		= 0;
379 	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
380 		      call->request_size);
381 	msg.msg_control		= NULL;
382 	msg.msg_controllen	= 0;
383 	msg.msg_flags		= (call->send_pages ? MSG_MORE : 0);
384 
385 	/* have to change the state *before* sending the last packet as RxRPC
386 	 * might give us the reply before it returns from sending the
387 	 * request */
388 	if (!call->send_pages)
389 		call->state = AFS_CALL_AWAIT_REPLY;
390 	ret = rxrpc_kernel_send_data(afs_socket, rxcall,
391 				     &msg, call->request_size);
392 	if (ret < 0)
393 		goto error_do_abort;
394 
395 	if (call->send_pages) {
396 		ret = afs_send_pages(call, &msg, iov);
397 		if (ret < 0)
398 			goto error_do_abort;
399 	}
400 
401 	/* at this point, an async call may no longer exist as it may have
402 	 * already completed */
403 	if (call->async)
404 		return -EINPROGRESS;
405 
406 	return afs_wait_for_call_to_complete(call);
407 
408 error_do_abort:
409 	rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
410 error_kill_call:
411 	afs_put_call(call);
412 	_leave(" = %d", ret);
413 	return ret;
414 }
415 
416 /*
417  * deliver messages to a call
418  */
419 static void afs_deliver_to_call(struct afs_call *call)
420 {
421 	u32 abort_code;
422 	int ret;
423 
424 	_enter("%s", call->type->name);
425 
426 	while (call->state == AFS_CALL_AWAIT_REPLY ||
427 	       call->state == AFS_CALL_AWAIT_OP_ID ||
428 	       call->state == AFS_CALL_AWAIT_REQUEST ||
429 	       call->state == AFS_CALL_AWAIT_ACK
430 	       ) {
431 		if (call->state == AFS_CALL_AWAIT_ACK) {
432 			size_t offset = 0;
433 			ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
434 						     NULL, 0, &offset, false,
435 						     &call->abort_code);
436 			trace_afs_recv_data(call, 0, offset, false, ret);
437 
438 			if (ret == -EINPROGRESS || ret == -EAGAIN)
439 				return;
440 			if (ret == 1 || ret < 0) {
441 				call->state = AFS_CALL_COMPLETE;
442 				goto done;
443 			}
444 			return;
445 		}
446 
447 		ret = call->type->deliver(call);
448 		switch (ret) {
449 		case 0:
450 			if (call->state == AFS_CALL_AWAIT_REPLY)
451 				call->state = AFS_CALL_COMPLETE;
452 			goto done;
453 		case -EINPROGRESS:
454 		case -EAGAIN:
455 			goto out;
456 		case -ENOTCONN:
457 			abort_code = RX_CALL_DEAD;
458 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
459 						abort_code, -ret, "KNC");
460 			goto do_abort;
461 		case -ENOTSUPP:
462 			abort_code = RX_INVALID_OPERATION;
463 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
464 						abort_code, -ret, "KIV");
465 			goto do_abort;
466 		case -ENODATA:
467 		case -EBADMSG:
468 		case -EMSGSIZE:
469 		default:
470 			abort_code = RXGEN_CC_UNMARSHAL;
471 			if (call->state != AFS_CALL_AWAIT_REPLY)
472 				abort_code = RXGEN_SS_UNMARSHAL;
473 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
474 						abort_code, EBADMSG, "KUM");
475 			goto do_abort;
476 		}
477 	}
478 
479 done:
480 	if (call->state == AFS_CALL_COMPLETE && call->incoming)
481 		afs_put_call(call);
482 out:
483 	_leave("");
484 	return;
485 
486 do_abort:
487 	call->error = ret;
488 	call->state = AFS_CALL_COMPLETE;
489 	goto done;
490 }
491 
492 /*
493  * wait synchronously for a call to complete
494  */
495 static int afs_wait_for_call_to_complete(struct afs_call *call)
496 {
497 	const char *abort_why;
498 	int ret;
499 
500 	DECLARE_WAITQUEUE(myself, current);
501 
502 	_enter("");
503 
504 	add_wait_queue(&call->waitq, &myself);
505 	for (;;) {
506 		set_current_state(TASK_INTERRUPTIBLE);
507 
508 		/* deliver any messages that are in the queue */
509 		if (call->state < AFS_CALL_COMPLETE && call->need_attention) {
510 			call->need_attention = false;
511 			__set_current_state(TASK_RUNNING);
512 			afs_deliver_to_call(call);
513 			continue;
514 		}
515 
516 		abort_why = "KWC";
517 		ret = call->error;
518 		if (call->state == AFS_CALL_COMPLETE)
519 			break;
520 		abort_why = "KWI";
521 		ret = -EINTR;
522 		if (signal_pending(current))
523 			break;
524 		schedule();
525 	}
526 
527 	remove_wait_queue(&call->waitq, &myself);
528 	__set_current_state(TASK_RUNNING);
529 
530 	/* kill the call */
531 	if (call->state < AFS_CALL_COMPLETE) {
532 		_debug("call incomplete");
533 		rxrpc_kernel_abort_call(afs_socket, call->rxcall,
534 					RX_CALL_DEAD, -ret, abort_why);
535 	}
536 
537 	_debug("call complete");
538 	afs_put_call(call);
539 	_leave(" = %d", ret);
540 	return ret;
541 }
542 
543 /*
544  * wake up a waiting call
545  */
546 static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall,
547 				    unsigned long call_user_ID)
548 {
549 	struct afs_call *call = (struct afs_call *)call_user_ID;
550 
551 	call->need_attention = true;
552 	wake_up(&call->waitq);
553 }
554 
555 /*
556  * wake up an asynchronous call
557  */
558 static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
559 				   unsigned long call_user_ID)
560 {
561 	struct afs_call *call = (struct afs_call *)call_user_ID;
562 	int u;
563 
564 	trace_afs_notify_call(rxcall, call);
565 	call->need_attention = true;
566 
567 	u = __atomic_add_unless(&call->usage, 1, 0);
568 	if (u != 0) {
569 		trace_afs_call(call, afs_call_trace_wake, u,
570 			       atomic_read(&afs_outstanding_calls),
571 			       __builtin_return_address(0));
572 
573 		if (!queue_work(afs_async_calls, &call->async_work))
574 			afs_put_call(call);
575 	}
576 }
577 
578 /*
579  * Delete an asynchronous call.  The work item carries a ref to the call struct
580  * that we need to release.
581  */
582 static void afs_delete_async_call(struct work_struct *work)
583 {
584 	struct afs_call *call = container_of(work, struct afs_call, async_work);
585 
586 	_enter("");
587 
588 	afs_put_call(call);
589 
590 	_leave("");
591 }
592 
593 /*
594  * Perform I/O processing on an asynchronous call.  The work item carries a ref
595  * to the call struct that we either need to release or to pass on.
596  */
597 static void afs_process_async_call(struct work_struct *work)
598 {
599 	struct afs_call *call = container_of(work, struct afs_call, async_work);
600 
601 	_enter("");
602 
603 	if (call->state < AFS_CALL_COMPLETE && call->need_attention) {
604 		call->need_attention = false;
605 		afs_deliver_to_call(call);
606 	}
607 
608 	if (call->state == AFS_CALL_COMPLETE) {
609 		call->reply = NULL;
610 
611 		/* We have two refs to release - one from the alloc and one
612 		 * queued with the work item - and we can't just deallocate the
613 		 * call because the work item may be queued again.
614 		 */
615 		call->async_work.func = afs_delete_async_call;
616 		if (!queue_work(afs_async_calls, &call->async_work))
617 			afs_put_call(call);
618 	}
619 
620 	afs_put_call(call);
621 	_leave("");
622 }
623 
624 static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID)
625 {
626 	struct afs_call *call = (struct afs_call *)user_call_ID;
627 
628 	call->rxcall = rxcall;
629 }
630 
631 /*
632  * Charge the incoming call preallocation.
633  */
634 static void afs_charge_preallocation(struct work_struct *work)
635 {
636 	struct afs_call *call = afs_spare_incoming_call;
637 
638 	for (;;) {
639 		if (!call) {
640 			call = afs_alloc_call(&afs_RXCMxxxx, GFP_KERNEL);
641 			if (!call)
642 				break;
643 
644 			call->async = true;
645 			call->state = AFS_CALL_AWAIT_OP_ID;
646 			init_waitqueue_head(&call->waitq);
647 		}
648 
649 		if (rxrpc_kernel_charge_accept(afs_socket,
650 					       afs_wake_up_async_call,
651 					       afs_rx_attach,
652 					       (unsigned long)call,
653 					       GFP_KERNEL) < 0)
654 			break;
655 		call = NULL;
656 	}
657 	afs_spare_incoming_call = call;
658 }
659 
660 /*
661  * Discard a preallocated call when a socket is shut down.
662  */
663 static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
664 				    unsigned long user_call_ID)
665 {
666 	struct afs_call *call = (struct afs_call *)user_call_ID;
667 
668 	call->rxcall = NULL;
669 	afs_put_call(call);
670 }
671 
672 /*
673  * Notification of an incoming call.
674  */
675 static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
676 			    unsigned long user_call_ID)
677 {
678 	queue_work(afs_wq, &afs_charge_preallocation_work);
679 }
680 
681 /*
682  * Grab the operation ID from an incoming cache manager call.  The socket
683  * buffer is discarded on error or if we don't yet have sufficient data.
684  */
685 static int afs_deliver_cm_op_id(struct afs_call *call)
686 {
687 	int ret;
688 
689 	_enter("{%zu}", call->offset);
690 
691 	ASSERTCMP(call->offset, <, 4);
692 
693 	/* the operation ID forms the first four bytes of the request data */
694 	ret = afs_extract_data(call, &call->tmp, 4, true);
695 	if (ret < 0)
696 		return ret;
697 
698 	call->operation_ID = ntohl(call->tmp);
699 	call->state = AFS_CALL_AWAIT_REQUEST;
700 	call->offset = 0;
701 
702 	/* ask the cache manager to route the call (it'll change the call type
703 	 * if successful) */
704 	if (!afs_cm_incoming_call(call))
705 		return -ENOTSUPP;
706 
707 	trace_afs_cb_call(call);
708 
709 	/* pass responsibility for the remainer of this message off to the
710 	 * cache manager op */
711 	return call->type->deliver(call);
712 }
713 
714 /*
715  * send an empty reply
716  */
717 void afs_send_empty_reply(struct afs_call *call)
718 {
719 	struct msghdr msg;
720 
721 	_enter("");
722 
723 	msg.msg_name		= NULL;
724 	msg.msg_namelen		= 0;
725 	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
726 	msg.msg_control		= NULL;
727 	msg.msg_controllen	= 0;
728 	msg.msg_flags		= 0;
729 
730 	call->state = AFS_CALL_AWAIT_ACK;
731 	switch (rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, 0)) {
732 	case 0:
733 		_leave(" [replied]");
734 		return;
735 
736 	case -ENOMEM:
737 		_debug("oom");
738 		rxrpc_kernel_abort_call(afs_socket, call->rxcall,
739 					RX_USER_ABORT, ENOMEM, "KOO");
740 	default:
741 		_leave(" [error]");
742 		return;
743 	}
744 }
745 
746 /*
747  * send a simple reply
748  */
749 void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
750 {
751 	struct msghdr msg;
752 	struct kvec iov[1];
753 	int n;
754 
755 	_enter("");
756 
757 	iov[0].iov_base		= (void *) buf;
758 	iov[0].iov_len		= len;
759 	msg.msg_name		= NULL;
760 	msg.msg_namelen		= 0;
761 	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
762 	msg.msg_control		= NULL;
763 	msg.msg_controllen	= 0;
764 	msg.msg_flags		= 0;
765 
766 	call->state = AFS_CALL_AWAIT_ACK;
767 	n = rxrpc_kernel_send_data(afs_socket, call->rxcall, &msg, len);
768 	if (n >= 0) {
769 		/* Success */
770 		_leave(" [replied]");
771 		return;
772 	}
773 
774 	if (n == -ENOMEM) {
775 		_debug("oom");
776 		rxrpc_kernel_abort_call(afs_socket, call->rxcall,
777 					RX_USER_ABORT, ENOMEM, "KOO");
778 	}
779 	_leave(" [error]");
780 }
781 
782 /*
783  * Extract a piece of data from the received data socket buffers.
784  */
785 int afs_extract_data(struct afs_call *call, void *buf, size_t count,
786 		     bool want_more)
787 {
788 	int ret;
789 
790 	_enter("{%s,%zu},,%zu,%d",
791 	       call->type->name, call->offset, count, want_more);
792 
793 	ASSERTCMP(call->offset, <=, count);
794 
795 	ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
796 				     buf, count, &call->offset,
797 				     want_more, &call->abort_code);
798 	trace_afs_recv_data(call, count, call->offset, want_more, ret);
799 	if (ret == 0 || ret == -EAGAIN)
800 		return ret;
801 
802 	if (ret == 1) {
803 		switch (call->state) {
804 		case AFS_CALL_AWAIT_REPLY:
805 			call->state = AFS_CALL_COMPLETE;
806 			break;
807 		case AFS_CALL_AWAIT_REQUEST:
808 			call->state = AFS_CALL_REPLYING;
809 			break;
810 		default:
811 			break;
812 		}
813 		return 0;
814 	}
815 
816 	if (ret == -ECONNABORTED)
817 		call->error = call->type->abort_to_error(call->abort_code);
818 	else
819 		call->error = ret;
820 	call->state = AFS_CALL_COMPLETE;
821 	return ret;
822 }
823