xref: /openbmc/linux/net/rxrpc/sendmsg.c (revision 4da722ca)
1 /* AF_RXRPC sendmsg() implementation.
2  *
3  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/net.h>
15 #include <linux/gfp.h>
16 #include <linux/skbuff.h>
17 #include <linux/export.h>
18 #include <linux/sched/signal.h>
19 
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include "ar-internal.h"
23 
24 enum rxrpc_command {
25 	RXRPC_CMD_SEND_DATA,		/* send data message */
26 	RXRPC_CMD_SEND_ABORT,		/* request abort generation */
27 	RXRPC_CMD_ACCEPT,		/* [server] accept incoming call */
28 	RXRPC_CMD_REJECT_BUSY,		/* [server] reject a call as busy */
29 };
30 
31 struct rxrpc_send_params {
32 	s64			tx_total_len;	/* Total Tx data length (if send data) */
33 	unsigned long		user_call_ID;	/* User's call ID */
34 	u32			abort_code;	/* Abort code to Tx (if abort) */
35 	enum rxrpc_command	command : 8;	/* The command to implement */
36 	bool			exclusive;	/* Shared or exclusive call */
37 	bool			upgrade;	/* If the connection is upgradeable */
38 };
39 
40 /*
41  * wait for space to appear in the transmit/ACK window
42  * - caller holds the socket locked
43  */
44 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
45 				    struct rxrpc_call *call,
46 				    long *timeo)
47 {
48 	DECLARE_WAITQUEUE(myself, current);
49 	int ret;
50 
51 	_enter(",{%u,%u,%u}",
52 	       call->tx_hard_ack, call->tx_top, call->tx_winsize);
53 
54 	add_wait_queue(&call->waitq, &myself);
55 
56 	for (;;) {
57 		set_current_state(TASK_INTERRUPTIBLE);
58 		ret = 0;
59 		if (call->tx_top - call->tx_hard_ack <
60 		    min_t(unsigned int, call->tx_winsize,
61 			  call->cong_cwnd + call->cong_extra))
62 			break;
63 		if (call->state >= RXRPC_CALL_COMPLETE) {
64 			ret = -call->error;
65 			break;
66 		}
67 		if (signal_pending(current)) {
68 			ret = sock_intr_errno(*timeo);
69 			break;
70 		}
71 
72 		trace_rxrpc_transmit(call, rxrpc_transmit_wait);
73 		mutex_unlock(&call->user_mutex);
74 		*timeo = schedule_timeout(*timeo);
75 		if (mutex_lock_interruptible(&call->user_mutex) < 0) {
76 			ret = sock_intr_errno(*timeo);
77 			break;
78 		}
79 	}
80 
81 	remove_wait_queue(&call->waitq, &myself);
82 	set_current_state(TASK_RUNNING);
83 	_leave(" = %d", ret);
84 	return ret;
85 }
86 
87 /*
88  * Schedule an instant Tx resend.
89  */
90 static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
91 {
92 	spin_lock_bh(&call->lock);
93 
94 	if (call->state < RXRPC_CALL_COMPLETE) {
95 		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
96 		if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
97 			rxrpc_queue_call(call);
98 	}
99 
100 	spin_unlock_bh(&call->lock);
101 }
102 
103 /*
104  * Queue a DATA packet for transmission, set the resend timeout and send the
105  * packet immediately
106  */
107 static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
108 			       bool last)
109 {
110 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
111 	rxrpc_seq_t seq = sp->hdr.seq;
112 	int ret, ix;
113 	u8 annotation = RXRPC_TX_ANNO_UNACK;
114 
115 	_net("queue skb %p [%d]", skb, seq);
116 
117 	ASSERTCMP(seq, ==, call->tx_top + 1);
118 
119 	if (last)
120 		annotation |= RXRPC_TX_ANNO_LAST;
121 
122 	/* We have to set the timestamp before queueing as the retransmit
123 	 * algorithm can see the packet as soon as we queue it.
124 	 */
125 	skb->tstamp = ktime_get_real();
126 
127 	ix = seq & RXRPC_RXTX_BUFF_MASK;
128 	rxrpc_get_skb(skb, rxrpc_skb_tx_got);
129 	call->rxtx_annotations[ix] = annotation;
130 	smp_wmb();
131 	call->rxtx_buffer[ix] = skb;
132 	call->tx_top = seq;
133 	if (last)
134 		trace_rxrpc_transmit(call, rxrpc_transmit_queue_last);
135 	else
136 		trace_rxrpc_transmit(call, rxrpc_transmit_queue);
137 
138 	if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
139 		_debug("________awaiting reply/ACK__________");
140 		write_lock_bh(&call->state_lock);
141 		switch (call->state) {
142 		case RXRPC_CALL_CLIENT_SEND_REQUEST:
143 			call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
144 			break;
145 		case RXRPC_CALL_SERVER_ACK_REQUEST:
146 			call->state = RXRPC_CALL_SERVER_SEND_REPLY;
147 			call->ack_at = call->expire_at;
148 			if (call->ackr_reason == RXRPC_ACK_DELAY)
149 				call->ackr_reason = 0;
150 			__rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
151 					  ktime_get_real());
152 			if (!last)
153 				break;
154 		case RXRPC_CALL_SERVER_SEND_REPLY:
155 			call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
156 			break;
157 		default:
158 			break;
159 		}
160 		write_unlock_bh(&call->state_lock);
161 	}
162 
163 	if (seq == 1 && rxrpc_is_client_call(call))
164 		rxrpc_expose_client_call(call);
165 
166 	ret = rxrpc_send_data_packet(call, skb, false);
167 	if (ret < 0) {
168 		_debug("need instant resend %d", ret);
169 		rxrpc_instant_resend(call, ix);
170 	} else {
171 		ktime_t now = ktime_get_real(), resend_at;
172 
173 		resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
174 
175 		if (ktime_before(resend_at, call->resend_at)) {
176 			call->resend_at = resend_at;
177 			rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
178 		}
179 	}
180 
181 	rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
182 	_leave("");
183 }
184 
185 /*
186  * send data through a socket
187  * - must be called in process context
188  * - The caller holds the call user access mutex, but not the socket lock.
189  */
190 static int rxrpc_send_data(struct rxrpc_sock *rx,
191 			   struct rxrpc_call *call,
192 			   struct msghdr *msg, size_t len)
193 {
194 	struct rxrpc_skb_priv *sp;
195 	struct sk_buff *skb;
196 	struct sock *sk = &rx->sk;
197 	long timeo;
198 	bool more;
199 	int ret, copied;
200 
201 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
202 
203 	/* this should be in poll */
204 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
205 
206 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
207 		return -EPIPE;
208 
209 	more = msg->msg_flags & MSG_MORE;
210 
211 	if (call->tx_total_len != -1) {
212 		if (len > call->tx_total_len)
213 			return -EMSGSIZE;
214 		if (!more && len != call->tx_total_len)
215 			return -EMSGSIZE;
216 	}
217 
218 	skb = call->tx_pending;
219 	call->tx_pending = NULL;
220 	rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
221 
222 	copied = 0;
223 	do {
224 		/* Check to see if there's a ping ACK to reply to. */
225 		if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
226 			rxrpc_send_ack_packet(call, false);
227 
228 		if (!skb) {
229 			size_t size, chunk, max, space;
230 
231 			_debug("alloc");
232 
233 			if (call->tx_top - call->tx_hard_ack >=
234 			    min_t(unsigned int, call->tx_winsize,
235 				  call->cong_cwnd + call->cong_extra)) {
236 				ret = -EAGAIN;
237 				if (msg->msg_flags & MSG_DONTWAIT)
238 					goto maybe_error;
239 				ret = rxrpc_wait_for_tx_window(rx, call,
240 							       &timeo);
241 				if (ret < 0)
242 					goto maybe_error;
243 			}
244 
245 			max = RXRPC_JUMBO_DATALEN;
246 			max -= call->conn->security_size;
247 			max &= ~(call->conn->size_align - 1UL);
248 
249 			chunk = max;
250 			if (chunk > msg_data_left(msg) && !more)
251 				chunk = msg_data_left(msg);
252 
253 			space = chunk + call->conn->size_align;
254 			space &= ~(call->conn->size_align - 1UL);
255 
256 			size = space + call->conn->security_size;
257 
258 			_debug("SIZE: %zu/%zu/%zu", chunk, space, size);
259 
260 			/* create a buffer that we can retain until it's ACK'd */
261 			skb = sock_alloc_send_skb(
262 				sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
263 			if (!skb)
264 				goto maybe_error;
265 
266 			rxrpc_new_skb(skb, rxrpc_skb_tx_new);
267 
268 			_debug("ALLOC SEND %p", skb);
269 
270 			ASSERTCMP(skb->mark, ==, 0);
271 
272 			_debug("HS: %u", call->conn->security_size);
273 			skb_reserve(skb, call->conn->security_size);
274 			skb->len += call->conn->security_size;
275 
276 			sp = rxrpc_skb(skb);
277 			sp->remain = chunk;
278 			if (sp->remain > skb_tailroom(skb))
279 				sp->remain = skb_tailroom(skb);
280 
281 			_net("skb: hr %d, tr %d, hl %d, rm %d",
282 			       skb_headroom(skb),
283 			       skb_tailroom(skb),
284 			       skb_headlen(skb),
285 			       sp->remain);
286 
287 			skb->ip_summed = CHECKSUM_UNNECESSARY;
288 		}
289 
290 		_debug("append");
291 		sp = rxrpc_skb(skb);
292 
293 		/* append next segment of data to the current buffer */
294 		if (msg_data_left(msg) > 0) {
295 			int copy = skb_tailroom(skb);
296 			ASSERTCMP(copy, >, 0);
297 			if (copy > msg_data_left(msg))
298 				copy = msg_data_left(msg);
299 			if (copy > sp->remain)
300 				copy = sp->remain;
301 
302 			_debug("add");
303 			ret = skb_add_data(skb, &msg->msg_iter, copy);
304 			_debug("added");
305 			if (ret < 0)
306 				goto efault;
307 			sp->remain -= copy;
308 			skb->mark += copy;
309 			copied += copy;
310 			if (call->tx_total_len != -1)
311 				call->tx_total_len -= copy;
312 		}
313 
314 		/* check for the far side aborting the call or a network error
315 		 * occurring */
316 		if (call->state == RXRPC_CALL_COMPLETE)
317 			goto call_terminated;
318 
319 		/* add the packet to the send queue if it's now full */
320 		if (sp->remain <= 0 ||
321 		    (msg_data_left(msg) == 0 && !more)) {
322 			struct rxrpc_connection *conn = call->conn;
323 			uint32_t seq;
324 			size_t pad;
325 
326 			/* pad out if we're using security */
327 			if (conn->security_ix) {
328 				pad = conn->security_size + skb->mark;
329 				pad = conn->size_align - pad;
330 				pad &= conn->size_align - 1;
331 				_debug("pad %zu", pad);
332 				if (pad)
333 					skb_put_zero(skb, pad);
334 			}
335 
336 			seq = call->tx_top + 1;
337 
338 			sp->hdr.seq	= seq;
339 			sp->hdr._rsvd	= 0;
340 			sp->hdr.flags	= conn->out_clientflag;
341 
342 			if (msg_data_left(msg) == 0 && !more)
343 				sp->hdr.flags |= RXRPC_LAST_PACKET;
344 			else if (call->tx_top - call->tx_hard_ack <
345 				 call->tx_winsize)
346 				sp->hdr.flags |= RXRPC_MORE_PACKETS;
347 
348 			ret = conn->security->secure_packet(
349 				call, skb, skb->mark, skb->head);
350 			if (ret < 0)
351 				goto out;
352 
353 			rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
354 			skb = NULL;
355 		}
356 	} while (msg_data_left(msg) > 0);
357 
358 success:
359 	ret = copied;
360 out:
361 	call->tx_pending = skb;
362 	_leave(" = %d", ret);
363 	return ret;
364 
365 call_terminated:
366 	rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
367 	_leave(" = %d", -call->error);
368 	return -call->error;
369 
370 maybe_error:
371 	if (copied)
372 		goto success;
373 	goto out;
374 
375 efault:
376 	ret = -EFAULT;
377 	goto out;
378 }
379 
380 /*
381  * extract control messages from the sendmsg() control buffer
382  */
383 static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
384 {
385 	struct cmsghdr *cmsg;
386 	bool got_user_ID = false;
387 	int len;
388 
389 	if (msg->msg_controllen == 0)
390 		return -EINVAL;
391 
392 	for_each_cmsghdr(cmsg, msg) {
393 		if (!CMSG_OK(msg, cmsg))
394 			return -EINVAL;
395 
396 		len = cmsg->cmsg_len - sizeof(struct cmsghdr);
397 		_debug("CMSG %d, %d, %d",
398 		       cmsg->cmsg_level, cmsg->cmsg_type, len);
399 
400 		if (cmsg->cmsg_level != SOL_RXRPC)
401 			continue;
402 
403 		switch (cmsg->cmsg_type) {
404 		case RXRPC_USER_CALL_ID:
405 			if (msg->msg_flags & MSG_CMSG_COMPAT) {
406 				if (len != sizeof(u32))
407 					return -EINVAL;
408 				p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
409 			} else {
410 				if (len != sizeof(unsigned long))
411 					return -EINVAL;
412 				p->user_call_ID = *(unsigned long *)
413 					CMSG_DATA(cmsg);
414 			}
415 			got_user_ID = true;
416 			break;
417 
418 		case RXRPC_ABORT:
419 			if (p->command != RXRPC_CMD_SEND_DATA)
420 				return -EINVAL;
421 			p->command = RXRPC_CMD_SEND_ABORT;
422 			if (len != sizeof(p->abort_code))
423 				return -EINVAL;
424 			p->abort_code = *(unsigned int *)CMSG_DATA(cmsg);
425 			if (p->abort_code == 0)
426 				return -EINVAL;
427 			break;
428 
429 		case RXRPC_ACCEPT:
430 			if (p->command != RXRPC_CMD_SEND_DATA)
431 				return -EINVAL;
432 			p->command = RXRPC_CMD_ACCEPT;
433 			if (len != 0)
434 				return -EINVAL;
435 			break;
436 
437 		case RXRPC_EXCLUSIVE_CALL:
438 			p->exclusive = true;
439 			if (len != 0)
440 				return -EINVAL;
441 			break;
442 
443 		case RXRPC_UPGRADE_SERVICE:
444 			p->upgrade = true;
445 			if (len != 0)
446 				return -EINVAL;
447 			break;
448 
449 		case RXRPC_TX_LENGTH:
450 			if (p->tx_total_len != -1 || len != sizeof(__s64))
451 				return -EINVAL;
452 			p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
453 			if (p->tx_total_len < 0)
454 				return -EINVAL;
455 			break;
456 
457 		default:
458 			return -EINVAL;
459 		}
460 	}
461 
462 	if (!got_user_ID)
463 		return -EINVAL;
464 	if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
465 		return -EINVAL;
466 	_leave(" = 0");
467 	return 0;
468 }
469 
470 /*
471  * Create a new client call for sendmsg().
472  * - Called with the socket lock held, which it must release.
473  * - If it returns a call, the call's lock will need releasing by the caller.
474  */
475 static struct rxrpc_call *
476 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
477 				  struct rxrpc_send_params *p)
478 	__releases(&rx->sk.sk_lock.slock)
479 {
480 	struct rxrpc_conn_parameters cp;
481 	struct rxrpc_call *call;
482 	struct key *key;
483 
484 	DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
485 
486 	_enter("");
487 
488 	if (!msg->msg_name) {
489 		release_sock(&rx->sk);
490 		return ERR_PTR(-EDESTADDRREQ);
491 	}
492 
493 	key = rx->key;
494 	if (key && !rx->key->payload.data[0])
495 		key = NULL;
496 
497 	memset(&cp, 0, sizeof(cp));
498 	cp.local		= rx->local;
499 	cp.key			= rx->key;
500 	cp.security_level	= rx->min_sec_level;
501 	cp.exclusive		= rx->exclusive | p->exclusive;
502 	cp.upgrade		= p->upgrade;
503 	cp.service_id		= srx->srx_service;
504 	call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
505 				     p->tx_total_len, GFP_KERNEL);
506 	/* The socket is now unlocked */
507 
508 	_leave(" = %p\n", call);
509 	return call;
510 }
511 
512 /*
513  * send a message forming part of a client call through an RxRPC socket
514  * - caller holds the socket locked
515  * - the socket may be either a client socket or a server socket
516  */
517 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
518 	__releases(&rx->sk.sk_lock.slock)
519 {
520 	enum rxrpc_call_state state;
521 	struct rxrpc_call *call;
522 	int ret;
523 
524 	struct rxrpc_send_params p = {
525 		.tx_total_len	= -1,
526 		.user_call_ID	= 0,
527 		.abort_code	= 0,
528 		.command	= RXRPC_CMD_SEND_DATA,
529 		.exclusive	= false,
530 		.upgrade	= true,
531 	};
532 
533 	_enter("");
534 
535 	ret = rxrpc_sendmsg_cmsg(msg, &p);
536 	if (ret < 0)
537 		goto error_release_sock;
538 
539 	if (p.command == RXRPC_CMD_ACCEPT) {
540 		ret = -EINVAL;
541 		if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
542 			goto error_release_sock;
543 		call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
544 		/* The socket is now unlocked. */
545 		if (IS_ERR(call))
546 			return PTR_ERR(call);
547 		rxrpc_put_call(call, rxrpc_call_put);
548 		return 0;
549 	}
550 
551 	call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
552 	if (!call) {
553 		ret = -EBADSLT;
554 		if (p.command != RXRPC_CMD_SEND_DATA)
555 			goto error_release_sock;
556 		call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
557 		/* The socket is now unlocked... */
558 		if (IS_ERR(call))
559 			return PTR_ERR(call);
560 		/* ... and we have the call lock. */
561 	} else {
562 		switch (READ_ONCE(call->state)) {
563 		case RXRPC_CALL_UNINITIALISED:
564 		case RXRPC_CALL_CLIENT_AWAIT_CONN:
565 		case RXRPC_CALL_SERVER_PREALLOC:
566 		case RXRPC_CALL_SERVER_SECURING:
567 		case RXRPC_CALL_SERVER_ACCEPTING:
568 			ret = -EBUSY;
569 			goto error_release_sock;
570 		default:
571 			break;
572 		}
573 
574 		ret = mutex_lock_interruptible(&call->user_mutex);
575 		release_sock(&rx->sk);
576 		if (ret < 0) {
577 			ret = -ERESTARTSYS;
578 			goto error_put;
579 		}
580 
581 		if (p.tx_total_len != -1) {
582 			ret = -EINVAL;
583 			if (call->tx_total_len != -1 ||
584 			    call->tx_pending ||
585 			    call->tx_top != 0)
586 				goto error_put;
587 			call->tx_total_len = p.tx_total_len;
588 		}
589 	}
590 
591 	state = READ_ONCE(call->state);
592 	_debug("CALL %d USR %lx ST %d on CONN %p",
593 	       call->debug_id, call->user_call_ID, state, call->conn);
594 
595 	if (state >= RXRPC_CALL_COMPLETE) {
596 		/* it's too late for this call */
597 		ret = -ESHUTDOWN;
598 	} else if (p.command == RXRPC_CMD_SEND_ABORT) {
599 		ret = 0;
600 		if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
601 			ret = rxrpc_send_abort_packet(call);
602 	} else if (p.command != RXRPC_CMD_SEND_DATA) {
603 		ret = -EINVAL;
604 	} else if (rxrpc_is_client_call(call) &&
605 		   state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
606 		/* request phase complete for this client call */
607 		ret = -EPROTO;
608 	} else if (rxrpc_is_service_call(call) &&
609 		   state != RXRPC_CALL_SERVER_ACK_REQUEST &&
610 		   state != RXRPC_CALL_SERVER_SEND_REPLY) {
611 		/* Reply phase not begun or not complete for service call. */
612 		ret = -EPROTO;
613 	} else {
614 		ret = rxrpc_send_data(rx, call, msg, len);
615 	}
616 
617 	mutex_unlock(&call->user_mutex);
618 error_put:
619 	rxrpc_put_call(call, rxrpc_call_put);
620 	_leave(" = %d", ret);
621 	return ret;
622 
623 error_release_sock:
624 	release_sock(&rx->sk);
625 	return ret;
626 }
627 
628 /**
629  * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
630  * @sock: The socket the call is on
631  * @call: The call to send data through
632  * @msg: The data to send
633  * @len: The amount of data to send
634  *
635  * Allow a kernel service to send data on a call.  The call must be in an state
636  * appropriate to sending data.  No control data should be supplied in @msg,
637  * nor should an address be supplied.  MSG_MORE should be flagged if there's
638  * more data to come, otherwise this data will end the transmission phase.
639  */
640 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
641 			   struct msghdr *msg, size_t len)
642 {
643 	int ret;
644 
645 	_enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
646 
647 	ASSERTCMP(msg->msg_name, ==, NULL);
648 	ASSERTCMP(msg->msg_control, ==, NULL);
649 
650 	mutex_lock(&call->user_mutex);
651 
652 	_debug("CALL %d USR %lx ST %d on CONN %p",
653 	       call->debug_id, call->user_call_ID, call->state, call->conn);
654 
655 	switch (READ_ONCE(call->state)) {
656 	case RXRPC_CALL_CLIENT_SEND_REQUEST:
657 	case RXRPC_CALL_SERVER_ACK_REQUEST:
658 	case RXRPC_CALL_SERVER_SEND_REPLY:
659 		ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
660 		break;
661 	case RXRPC_CALL_COMPLETE:
662 		read_lock_bh(&call->state_lock);
663 		ret = -call->error;
664 		read_unlock_bh(&call->state_lock);
665 		break;
666 	default:
667 		/* Request phase complete for this client call */
668 		trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
669 		ret = -EPROTO;
670 		break;
671 	}
672 
673 	mutex_unlock(&call->user_mutex);
674 	_leave(" = %d", ret);
675 	return ret;
676 }
677 EXPORT_SYMBOL(rxrpc_kernel_send_data);
678 
679 /**
680  * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
681  * @sock: The socket the call is on
682  * @call: The call to be aborted
683  * @abort_code: The abort code to stick into the ABORT packet
684  * @error: Local error value
685  * @why: 3-char string indicating why.
686  *
687  * Allow a kernel service to abort a call, if it's still in an abortable state
688  * and return true if the call was aborted, false if it was already complete.
689  */
690 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
691 			     u32 abort_code, int error, const char *why)
692 {
693 	bool aborted;
694 
695 	_enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
696 
697 	mutex_lock(&call->user_mutex);
698 
699 	aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
700 	if (aborted)
701 		rxrpc_send_abort_packet(call);
702 
703 	mutex_unlock(&call->user_mutex);
704 	return aborted;
705 }
706 EXPORT_SYMBOL(rxrpc_kernel_abort_call);
707 
708 /**
709  * rxrpc_kernel_set_tx_length - Set the total Tx length on a call
710  * @sock: The socket the call is on
711  * @call: The call to be informed
712  * @tx_total_len: The amount of data to be transmitted for this call
713  *
714  * Allow a kernel service to set the total transmit length on a call.  This
715  * allows buffer-to-packet encrypt-and-copy to be performed.
716  *
717  * This function is primarily for use for setting the reply length since the
718  * request length can be set when beginning the call.
719  */
720 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
721 				s64 tx_total_len)
722 {
723 	WARN_ON(call->tx_total_len != -1);
724 	call->tx_total_len = tx_total_len;
725 }
726 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length);
727