xref: /openbmc/linux/io_uring/net.c (revision 3f58ff6b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	u32				file_slot;
32 	unsigned long			nofile;
33 };
34 
35 struct io_socket {
36 	struct file			*file;
37 	int				domain;
38 	int				type;
39 	int				protocol;
40 	int				flags;
41 	u32				file_slot;
42 	unsigned long			nofile;
43 };
44 
45 struct io_connect {
46 	struct file			*file;
47 	struct sockaddr __user		*addr;
48 	int				addr_len;
49 	bool				in_progress;
50 };
51 
52 struct io_sr_msg {
53 	struct file			*file;
54 	union {
55 		struct compat_msghdr __user	*umsg_compat;
56 		struct user_msghdr __user	*umsg;
57 		void __user			*buf;
58 	};
59 	unsigned			len;
60 	unsigned			done_io;
61 	unsigned			msg_flags;
62 	u16				flags;
63 	/* initialised and used only by !msg send variants */
64 	u16				addr_len;
65 	void __user			*addr;
66 	/* used only for send zerocopy */
67 	struct io_kiocb 		*notif;
68 };
69 
70 static inline bool io_check_multishot(struct io_kiocb *req,
71 				      unsigned int issue_flags)
72 {
73 	/*
74 	 * When ->locked_cq is set we only allow to post CQEs from the original
75 	 * task context. Usual request completions will be handled in other
76 	 * generic paths but multipoll may decide to post extra cqes.
77 	 */
78 	return !(issue_flags & IO_URING_F_IOWQ) ||
79 		!(issue_flags & IO_URING_F_MULTISHOT) ||
80 		!req->ctx->task_complete;
81 }
82 
83 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
84 {
85 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
86 
87 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
88 		     sqe->buf_index || sqe->splice_fd_in))
89 		return -EINVAL;
90 
91 	shutdown->how = READ_ONCE(sqe->len);
92 	return 0;
93 }
94 
95 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
96 {
97 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
98 	struct socket *sock;
99 	int ret;
100 
101 	if (issue_flags & IO_URING_F_NONBLOCK)
102 		return -EAGAIN;
103 
104 	sock = sock_from_file(req->file);
105 	if (unlikely(!sock))
106 		return -ENOTSOCK;
107 
108 	ret = __sys_shutdown_sock(sock, shutdown->how);
109 	io_req_set_res(req, ret, 0);
110 	return IOU_OK;
111 }
112 
113 static bool io_net_retry(struct socket *sock, int flags)
114 {
115 	if (!(flags & MSG_WAITALL))
116 		return false;
117 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
118 }
119 
120 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
121 {
122 	struct io_async_msghdr *hdr = req->async_data;
123 
124 	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
125 		return;
126 
127 	/* Let normal cleanup path reap it if we fail adding to the cache */
128 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
129 		req->async_data = NULL;
130 		req->flags &= ~REQ_F_ASYNC_DATA;
131 	}
132 }
133 
134 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
135 						  unsigned int issue_flags)
136 {
137 	struct io_ring_ctx *ctx = req->ctx;
138 	struct io_cache_entry *entry;
139 	struct io_async_msghdr *hdr;
140 
141 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
142 		entry = io_alloc_cache_get(&ctx->netmsg_cache);
143 		if (entry) {
144 			hdr = container_of(entry, struct io_async_msghdr, cache);
145 			hdr->free_iov = NULL;
146 			req->flags |= REQ_F_ASYNC_DATA;
147 			req->async_data = hdr;
148 			return hdr;
149 		}
150 	}
151 
152 	if (!io_alloc_async_data(req)) {
153 		hdr = req->async_data;
154 		hdr->free_iov = NULL;
155 		return hdr;
156 	}
157 	return NULL;
158 }
159 
160 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
161 {
162 	/* ->prep_async is always called from the submission context */
163 	return io_msg_alloc_async(req, 0);
164 }
165 
166 static int io_setup_async_msg(struct io_kiocb *req,
167 			      struct io_async_msghdr *kmsg,
168 			      unsigned int issue_flags)
169 {
170 	struct io_async_msghdr *async_msg;
171 
172 	if (req_has_async_data(req))
173 		return -EAGAIN;
174 	async_msg = io_msg_alloc_async(req, issue_flags);
175 	if (!async_msg) {
176 		kfree(kmsg->free_iov);
177 		return -ENOMEM;
178 	}
179 	req->flags |= REQ_F_NEED_CLEANUP;
180 	memcpy(async_msg, kmsg, sizeof(*kmsg));
181 	if (async_msg->msg.msg_name)
182 		async_msg->msg.msg_name = &async_msg->addr;
183 	/* if were using fast_iov, set it to the new one */
184 	if (!kmsg->free_iov) {
185 		size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
186 		async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
187 	}
188 
189 	return -EAGAIN;
190 }
191 
192 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
193 			       struct io_async_msghdr *iomsg)
194 {
195 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
196 
197 	iomsg->msg.msg_name = &iomsg->addr;
198 	iomsg->free_iov = iomsg->fast_iov;
199 	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
200 					&iomsg->free_iov);
201 }
202 
203 int io_send_prep_async(struct io_kiocb *req)
204 {
205 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
206 	struct io_async_msghdr *io;
207 	int ret;
208 
209 	if (!zc->addr || req_has_async_data(req))
210 		return 0;
211 	io = io_msg_alloc_async_prep(req);
212 	if (!io)
213 		return -ENOMEM;
214 	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
215 	return ret;
216 }
217 
218 static int io_setup_async_addr(struct io_kiocb *req,
219 			      struct sockaddr_storage *addr_storage,
220 			      unsigned int issue_flags)
221 {
222 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
223 	struct io_async_msghdr *io;
224 
225 	if (!sr->addr || req_has_async_data(req))
226 		return -EAGAIN;
227 	io = io_msg_alloc_async(req, issue_flags);
228 	if (!io)
229 		return -ENOMEM;
230 	memcpy(&io->addr, addr_storage, sizeof(io->addr));
231 	return -EAGAIN;
232 }
233 
234 int io_sendmsg_prep_async(struct io_kiocb *req)
235 {
236 	int ret;
237 
238 	if (!io_msg_alloc_async_prep(req))
239 		return -ENOMEM;
240 	ret = io_sendmsg_copy_hdr(req, req->async_data);
241 	if (!ret)
242 		req->flags |= REQ_F_NEED_CLEANUP;
243 	return ret;
244 }
245 
246 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
247 {
248 	struct io_async_msghdr *io = req->async_data;
249 
250 	kfree(io->free_iov);
251 }
252 
253 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
254 {
255 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
256 
257 	if (req->opcode == IORING_OP_SEND) {
258 		if (READ_ONCE(sqe->__pad3[0]))
259 			return -EINVAL;
260 		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
261 		sr->addr_len = READ_ONCE(sqe->addr_len);
262 	} else if (sqe->addr2 || sqe->file_index) {
263 		return -EINVAL;
264 	}
265 
266 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
267 	sr->len = READ_ONCE(sqe->len);
268 	sr->flags = READ_ONCE(sqe->ioprio);
269 	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
270 		return -EINVAL;
271 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
272 	if (sr->msg_flags & MSG_DONTWAIT)
273 		req->flags |= REQ_F_NOWAIT;
274 
275 #ifdef CONFIG_COMPAT
276 	if (req->ctx->compat)
277 		sr->msg_flags |= MSG_CMSG_COMPAT;
278 #endif
279 	sr->done_io = 0;
280 	return 0;
281 }
282 
283 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
284 {
285 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
286 	struct io_async_msghdr iomsg, *kmsg;
287 	struct socket *sock;
288 	unsigned flags;
289 	int min_ret = 0;
290 	int ret;
291 
292 	sock = sock_from_file(req->file);
293 	if (unlikely(!sock))
294 		return -ENOTSOCK;
295 
296 	if (req_has_async_data(req)) {
297 		kmsg = req->async_data;
298 	} else {
299 		ret = io_sendmsg_copy_hdr(req, &iomsg);
300 		if (ret)
301 			return ret;
302 		kmsg = &iomsg;
303 	}
304 
305 	if (!(req->flags & REQ_F_POLLED) &&
306 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
307 		return io_setup_async_msg(req, kmsg, issue_flags);
308 
309 	flags = sr->msg_flags;
310 	if (issue_flags & IO_URING_F_NONBLOCK)
311 		flags |= MSG_DONTWAIT;
312 	if (flags & MSG_WAITALL)
313 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
314 
315 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
316 
317 	if (ret < min_ret) {
318 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
319 			return io_setup_async_msg(req, kmsg, issue_flags);
320 		if (ret > 0 && io_net_retry(sock, flags)) {
321 			sr->done_io += ret;
322 			req->flags |= REQ_F_PARTIAL_IO;
323 			return io_setup_async_msg(req, kmsg, issue_flags);
324 		}
325 		if (ret == -ERESTARTSYS)
326 			ret = -EINTR;
327 		req_set_fail(req);
328 	}
329 	/* fast path, check for non-NULL to avoid function call */
330 	if (kmsg->free_iov)
331 		kfree(kmsg->free_iov);
332 	req->flags &= ~REQ_F_NEED_CLEANUP;
333 	io_netmsg_recycle(req, issue_flags);
334 	if (ret >= 0)
335 		ret += sr->done_io;
336 	else if (sr->done_io)
337 		ret = sr->done_io;
338 	io_req_set_res(req, ret, 0);
339 	return IOU_OK;
340 }
341 
342 int io_send(struct io_kiocb *req, unsigned int issue_flags)
343 {
344 	struct sockaddr_storage __address;
345 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
346 	struct msghdr msg;
347 	struct iovec iov;
348 	struct socket *sock;
349 	unsigned flags;
350 	int min_ret = 0;
351 	int ret;
352 
353 	msg.msg_name = NULL;
354 	msg.msg_control = NULL;
355 	msg.msg_controllen = 0;
356 	msg.msg_namelen = 0;
357 	msg.msg_ubuf = NULL;
358 
359 	if (sr->addr) {
360 		if (req_has_async_data(req)) {
361 			struct io_async_msghdr *io = req->async_data;
362 
363 			msg.msg_name = &io->addr;
364 		} else {
365 			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
366 			if (unlikely(ret < 0))
367 				return ret;
368 			msg.msg_name = (struct sockaddr *)&__address;
369 		}
370 		msg.msg_namelen = sr->addr_len;
371 	}
372 
373 	if (!(req->flags & REQ_F_POLLED) &&
374 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
375 		return io_setup_async_addr(req, &__address, issue_flags);
376 
377 	sock = sock_from_file(req->file);
378 	if (unlikely(!sock))
379 		return -ENOTSOCK;
380 
381 	ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
382 	if (unlikely(ret))
383 		return ret;
384 
385 	flags = sr->msg_flags;
386 	if (issue_flags & IO_URING_F_NONBLOCK)
387 		flags |= MSG_DONTWAIT;
388 	if (flags & MSG_WAITALL)
389 		min_ret = iov_iter_count(&msg.msg_iter);
390 
391 	msg.msg_flags = flags;
392 	ret = sock_sendmsg(sock, &msg);
393 	if (ret < min_ret) {
394 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
395 			return io_setup_async_addr(req, &__address, issue_flags);
396 
397 		if (ret > 0 && io_net_retry(sock, flags)) {
398 			sr->len -= ret;
399 			sr->buf += ret;
400 			sr->done_io += ret;
401 			req->flags |= REQ_F_PARTIAL_IO;
402 			return io_setup_async_addr(req, &__address, issue_flags);
403 		}
404 		if (ret == -ERESTARTSYS)
405 			ret = -EINTR;
406 		req_set_fail(req);
407 	}
408 	if (ret >= 0)
409 		ret += sr->done_io;
410 	else if (sr->done_io)
411 		ret = sr->done_io;
412 	io_req_set_res(req, ret, 0);
413 	return IOU_OK;
414 }
415 
416 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
417 {
418 	int hdr;
419 
420 	if (iomsg->namelen < 0)
421 		return true;
422 	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
423 			       iomsg->namelen, &hdr))
424 		return true;
425 	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
426 		return true;
427 
428 	return false;
429 }
430 
431 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
432 				 struct io_async_msghdr *iomsg)
433 {
434 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
435 	struct user_msghdr msg;
436 	int ret;
437 
438 	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
439 		return -EFAULT;
440 
441 	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
442 	if (ret)
443 		return ret;
444 
445 	if (req->flags & REQ_F_BUFFER_SELECT) {
446 		if (msg.msg_iovlen == 0) {
447 			sr->len = iomsg->fast_iov[0].iov_len = 0;
448 			iomsg->fast_iov[0].iov_base = NULL;
449 			iomsg->free_iov = NULL;
450 		} else if (msg.msg_iovlen > 1) {
451 			return -EINVAL;
452 		} else {
453 			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
454 				return -EFAULT;
455 			sr->len = iomsg->fast_iov[0].iov_len;
456 			iomsg->free_iov = NULL;
457 		}
458 
459 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
460 			iomsg->namelen = msg.msg_namelen;
461 			iomsg->controllen = msg.msg_controllen;
462 			if (io_recvmsg_multishot_overflow(iomsg))
463 				return -EOVERFLOW;
464 		}
465 	} else {
466 		iomsg->free_iov = iomsg->fast_iov;
467 		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
468 				     &iomsg->free_iov, &iomsg->msg.msg_iter,
469 				     false);
470 		if (ret > 0)
471 			ret = 0;
472 	}
473 
474 	return ret;
475 }
476 
477 #ifdef CONFIG_COMPAT
478 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
479 					struct io_async_msghdr *iomsg)
480 {
481 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
482 	struct compat_msghdr msg;
483 	struct compat_iovec __user *uiov;
484 	int ret;
485 
486 	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
487 		return -EFAULT;
488 
489 	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
490 	if (ret)
491 		return ret;
492 
493 	uiov = compat_ptr(msg.msg_iov);
494 	if (req->flags & REQ_F_BUFFER_SELECT) {
495 		compat_ssize_t clen;
496 
497 		iomsg->free_iov = NULL;
498 		if (msg.msg_iovlen == 0) {
499 			sr->len = 0;
500 		} else if (msg.msg_iovlen > 1) {
501 			return -EINVAL;
502 		} else {
503 			if (!access_ok(uiov, sizeof(*uiov)))
504 				return -EFAULT;
505 			if (__get_user(clen, &uiov->iov_len))
506 				return -EFAULT;
507 			if (clen < 0)
508 				return -EINVAL;
509 			sr->len = clen;
510 		}
511 
512 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
513 			iomsg->namelen = msg.msg_namelen;
514 			iomsg->controllen = msg.msg_controllen;
515 			if (io_recvmsg_multishot_overflow(iomsg))
516 				return -EOVERFLOW;
517 		}
518 	} else {
519 		iomsg->free_iov = iomsg->fast_iov;
520 		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
521 				   UIO_FASTIOV, &iomsg->free_iov,
522 				   &iomsg->msg.msg_iter, true);
523 		if (ret < 0)
524 			return ret;
525 	}
526 
527 	return 0;
528 }
529 #endif
530 
531 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
532 			       struct io_async_msghdr *iomsg)
533 {
534 	iomsg->msg.msg_name = &iomsg->addr;
535 
536 #ifdef CONFIG_COMPAT
537 	if (req->ctx->compat)
538 		return __io_compat_recvmsg_copy_hdr(req, iomsg);
539 #endif
540 
541 	return __io_recvmsg_copy_hdr(req, iomsg);
542 }
543 
544 int io_recvmsg_prep_async(struct io_kiocb *req)
545 {
546 	int ret;
547 
548 	if (!io_msg_alloc_async_prep(req))
549 		return -ENOMEM;
550 	ret = io_recvmsg_copy_hdr(req, req->async_data);
551 	if (!ret)
552 		req->flags |= REQ_F_NEED_CLEANUP;
553 	return ret;
554 }
555 
556 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
557 
558 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
559 {
560 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
561 
562 	if (unlikely(sqe->file_index || sqe->addr2))
563 		return -EINVAL;
564 
565 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
566 	sr->len = READ_ONCE(sqe->len);
567 	sr->flags = READ_ONCE(sqe->ioprio);
568 	if (sr->flags & ~(RECVMSG_FLAGS))
569 		return -EINVAL;
570 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
571 	if (sr->msg_flags & MSG_DONTWAIT)
572 		req->flags |= REQ_F_NOWAIT;
573 	if (sr->msg_flags & MSG_ERRQUEUE)
574 		req->flags |= REQ_F_CLEAR_POLLIN;
575 	if (sr->flags & IORING_RECV_MULTISHOT) {
576 		if (!(req->flags & REQ_F_BUFFER_SELECT))
577 			return -EINVAL;
578 		if (sr->msg_flags & MSG_WAITALL)
579 			return -EINVAL;
580 		if (req->opcode == IORING_OP_RECV && sr->len)
581 			return -EINVAL;
582 		req->flags |= REQ_F_APOLL_MULTISHOT;
583 	}
584 
585 #ifdef CONFIG_COMPAT
586 	if (req->ctx->compat)
587 		sr->msg_flags |= MSG_CMSG_COMPAT;
588 #endif
589 	sr->done_io = 0;
590 	return 0;
591 }
592 
593 static inline void io_recv_prep_retry(struct io_kiocb *req)
594 {
595 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
596 
597 	sr->done_io = 0;
598 	sr->len = 0; /* get from the provided buffer */
599 }
600 
601 /*
602  * Finishes io_recv and io_recvmsg.
603  *
604  * Returns true if it is actually finished, or false if it should run
605  * again (for multishot).
606  */
607 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
608 				  unsigned int cflags, bool mshot_finished,
609 				  unsigned issue_flags)
610 {
611 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
612 		io_req_set_res(req, *ret, cflags);
613 		*ret = IOU_OK;
614 		return true;
615 	}
616 
617 	if (!mshot_finished) {
618 		if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
619 			       req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
620 			io_recv_prep_retry(req);
621 			return false;
622 		}
623 		/* Otherwise stop multishot but use the current result. */
624 	}
625 
626 	io_req_set_res(req, *ret, cflags);
627 
628 	if (issue_flags & IO_URING_F_MULTISHOT)
629 		*ret = IOU_STOP_MULTISHOT;
630 	else
631 		*ret = IOU_OK;
632 	return true;
633 }
634 
635 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
636 				     struct io_sr_msg *sr, void __user **buf,
637 				     size_t *len)
638 {
639 	unsigned long ubuf = (unsigned long) *buf;
640 	unsigned long hdr;
641 
642 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
643 		kmsg->controllen;
644 	if (*len < hdr)
645 		return -EFAULT;
646 
647 	if (kmsg->controllen) {
648 		unsigned long control = ubuf + hdr - kmsg->controllen;
649 
650 		kmsg->msg.msg_control_user = (void __user *) control;
651 		kmsg->msg.msg_controllen = kmsg->controllen;
652 	}
653 
654 	sr->buf = *buf; /* stash for later copy */
655 	*buf = (void __user *) (ubuf + hdr);
656 	kmsg->payloadlen = *len = *len - hdr;
657 	return 0;
658 }
659 
660 struct io_recvmsg_multishot_hdr {
661 	struct io_uring_recvmsg_out msg;
662 	struct sockaddr_storage addr;
663 };
664 
665 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
666 				struct io_async_msghdr *kmsg,
667 				unsigned int flags, bool *finished)
668 {
669 	int err;
670 	int copy_len;
671 	struct io_recvmsg_multishot_hdr hdr;
672 
673 	if (kmsg->namelen)
674 		kmsg->msg.msg_name = &hdr.addr;
675 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
676 	kmsg->msg.msg_namelen = 0;
677 
678 	if (sock->file->f_flags & O_NONBLOCK)
679 		flags |= MSG_DONTWAIT;
680 
681 	err = sock_recvmsg(sock, &kmsg->msg, flags);
682 	*finished = err <= 0;
683 	if (err < 0)
684 		return err;
685 
686 	hdr.msg = (struct io_uring_recvmsg_out) {
687 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
688 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
689 	};
690 
691 	hdr.msg.payloadlen = err;
692 	if (err > kmsg->payloadlen)
693 		err = kmsg->payloadlen;
694 
695 	copy_len = sizeof(struct io_uring_recvmsg_out);
696 	if (kmsg->msg.msg_namelen > kmsg->namelen)
697 		copy_len += kmsg->namelen;
698 	else
699 		copy_len += kmsg->msg.msg_namelen;
700 
701 	/*
702 	 *      "fromlen shall refer to the value before truncation.."
703 	 *                      1003.1g
704 	 */
705 	hdr.msg.namelen = kmsg->msg.msg_namelen;
706 
707 	/* ensure that there is no gap between hdr and sockaddr_storage */
708 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
709 		     sizeof(struct io_uring_recvmsg_out));
710 	if (copy_to_user(io->buf, &hdr, copy_len)) {
711 		*finished = true;
712 		return -EFAULT;
713 	}
714 
715 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
716 			kmsg->controllen + err;
717 }
718 
719 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
720 {
721 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
722 	struct io_async_msghdr iomsg, *kmsg;
723 	struct socket *sock;
724 	unsigned int cflags;
725 	unsigned flags;
726 	int ret, min_ret = 0;
727 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
728 	bool mshot_finished = true;
729 
730 	sock = sock_from_file(req->file);
731 	if (unlikely(!sock))
732 		return -ENOTSOCK;
733 
734 	if (req_has_async_data(req)) {
735 		kmsg = req->async_data;
736 	} else {
737 		ret = io_recvmsg_copy_hdr(req, &iomsg);
738 		if (ret)
739 			return ret;
740 		kmsg = &iomsg;
741 	}
742 
743 	if (!(req->flags & REQ_F_POLLED) &&
744 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
745 		return io_setup_async_msg(req, kmsg, issue_flags);
746 
747 	if (!io_check_multishot(req, issue_flags))
748 		return io_setup_async_msg(req, kmsg, issue_flags);
749 
750 retry_multishot:
751 	if (io_do_buffer_select(req)) {
752 		void __user *buf;
753 		size_t len = sr->len;
754 
755 		buf = io_buffer_select(req, &len, issue_flags);
756 		if (!buf)
757 			return -ENOBUFS;
758 
759 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
760 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
761 			if (ret) {
762 				io_kbuf_recycle(req, issue_flags);
763 				return ret;
764 			}
765 		}
766 
767 		kmsg->fast_iov[0].iov_base = buf;
768 		kmsg->fast_iov[0].iov_len = len;
769 		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
770 				len);
771 	}
772 
773 	flags = sr->msg_flags;
774 	if (force_nonblock)
775 		flags |= MSG_DONTWAIT;
776 	if (flags & MSG_WAITALL)
777 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
778 
779 	kmsg->msg.msg_get_inq = 1;
780 	if (req->flags & REQ_F_APOLL_MULTISHOT)
781 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
782 					   &mshot_finished);
783 	else
784 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
785 					 kmsg->uaddr, flags);
786 
787 	if (ret < min_ret) {
788 		if (ret == -EAGAIN && force_nonblock) {
789 			ret = io_setup_async_msg(req, kmsg, issue_flags);
790 			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
791 				io_kbuf_recycle(req, issue_flags);
792 				return IOU_ISSUE_SKIP_COMPLETE;
793 			}
794 			return ret;
795 		}
796 		if (ret > 0 && io_net_retry(sock, flags)) {
797 			sr->done_io += ret;
798 			req->flags |= REQ_F_PARTIAL_IO;
799 			return io_setup_async_msg(req, kmsg, issue_flags);
800 		}
801 		if (ret == -ERESTARTSYS)
802 			ret = -EINTR;
803 		req_set_fail(req);
804 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
805 		req_set_fail(req);
806 	}
807 
808 	if (ret > 0)
809 		ret += sr->done_io;
810 	else if (sr->done_io)
811 		ret = sr->done_io;
812 	else
813 		io_kbuf_recycle(req, issue_flags);
814 
815 	cflags = io_put_kbuf(req, issue_flags);
816 	if (kmsg->msg.msg_inq)
817 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
818 
819 	if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
820 		goto retry_multishot;
821 
822 	if (mshot_finished) {
823 		/* fast path, check for non-NULL to avoid function call */
824 		if (kmsg->free_iov)
825 			kfree(kmsg->free_iov);
826 		io_netmsg_recycle(req, issue_flags);
827 		req->flags &= ~REQ_F_NEED_CLEANUP;
828 	}
829 
830 	return ret;
831 }
832 
833 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
834 {
835 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
836 	struct msghdr msg;
837 	struct socket *sock;
838 	struct iovec iov;
839 	unsigned int cflags;
840 	unsigned flags;
841 	int ret, min_ret = 0;
842 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
843 	size_t len = sr->len;
844 
845 	if (!(req->flags & REQ_F_POLLED) &&
846 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
847 		return -EAGAIN;
848 
849 	if (!io_check_multishot(req, issue_flags))
850 		return -EAGAIN;
851 
852 	sock = sock_from_file(req->file);
853 	if (unlikely(!sock))
854 		return -ENOTSOCK;
855 
856 retry_multishot:
857 	if (io_do_buffer_select(req)) {
858 		void __user *buf;
859 
860 		buf = io_buffer_select(req, &len, issue_flags);
861 		if (!buf)
862 			return -ENOBUFS;
863 		sr->buf = buf;
864 	}
865 
866 	ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
867 	if (unlikely(ret))
868 		goto out_free;
869 
870 	msg.msg_name = NULL;
871 	msg.msg_namelen = 0;
872 	msg.msg_control = NULL;
873 	msg.msg_get_inq = 1;
874 	msg.msg_flags = 0;
875 	msg.msg_controllen = 0;
876 	msg.msg_iocb = NULL;
877 	msg.msg_ubuf = NULL;
878 
879 	flags = sr->msg_flags;
880 	if (force_nonblock)
881 		flags |= MSG_DONTWAIT;
882 	if (flags & MSG_WAITALL)
883 		min_ret = iov_iter_count(&msg.msg_iter);
884 
885 	ret = sock_recvmsg(sock, &msg, flags);
886 	if (ret < min_ret) {
887 		if (ret == -EAGAIN && force_nonblock) {
888 			if (issue_flags & IO_URING_F_MULTISHOT) {
889 				io_kbuf_recycle(req, issue_flags);
890 				return IOU_ISSUE_SKIP_COMPLETE;
891 			}
892 
893 			return -EAGAIN;
894 		}
895 		if (ret > 0 && io_net_retry(sock, flags)) {
896 			sr->len -= ret;
897 			sr->buf += ret;
898 			sr->done_io += ret;
899 			req->flags |= REQ_F_PARTIAL_IO;
900 			return -EAGAIN;
901 		}
902 		if (ret == -ERESTARTSYS)
903 			ret = -EINTR;
904 		req_set_fail(req);
905 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
906 out_free:
907 		req_set_fail(req);
908 	}
909 
910 	if (ret > 0)
911 		ret += sr->done_io;
912 	else if (sr->done_io)
913 		ret = sr->done_io;
914 	else
915 		io_kbuf_recycle(req, issue_flags);
916 
917 	cflags = io_put_kbuf(req, issue_flags);
918 	if (msg.msg_inq)
919 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
920 
921 	if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
922 		goto retry_multishot;
923 
924 	return ret;
925 }
926 
927 void io_send_zc_cleanup(struct io_kiocb *req)
928 {
929 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
930 	struct io_async_msghdr *io;
931 
932 	if (req_has_async_data(req)) {
933 		io = req->async_data;
934 		/* might be ->fast_iov if *msg_copy_hdr failed */
935 		if (io->free_iov != io->fast_iov)
936 			kfree(io->free_iov);
937 	}
938 	if (zc->notif) {
939 		io_notif_flush(zc->notif);
940 		zc->notif = NULL;
941 	}
942 }
943 
944 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
945 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
946 
947 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
948 {
949 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
950 	struct io_ring_ctx *ctx = req->ctx;
951 	struct io_kiocb *notif;
952 
953 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
954 		return -EINVAL;
955 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
956 	if (req->flags & REQ_F_CQE_SKIP)
957 		return -EINVAL;
958 
959 	notif = zc->notif = io_alloc_notif(ctx);
960 	if (!notif)
961 		return -ENOMEM;
962 	notif->cqe.user_data = req->cqe.user_data;
963 	notif->cqe.res = 0;
964 	notif->cqe.flags = IORING_CQE_F_NOTIF;
965 	req->flags |= REQ_F_NEED_CLEANUP;
966 
967 	zc->flags = READ_ONCE(sqe->ioprio);
968 	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
969 		if (zc->flags & ~IO_ZC_FLAGS_VALID)
970 			return -EINVAL;
971 		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
972 			io_notif_set_extended(notif);
973 			io_notif_to_data(notif)->zc_report = true;
974 		}
975 	}
976 
977 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
978 		unsigned idx = READ_ONCE(sqe->buf_index);
979 
980 		if (unlikely(idx >= ctx->nr_user_bufs))
981 			return -EFAULT;
982 		idx = array_index_nospec(idx, ctx->nr_user_bufs);
983 		req->imu = READ_ONCE(ctx->user_bufs[idx]);
984 		io_req_set_rsrc_node(notif, ctx, 0);
985 	}
986 
987 	if (req->opcode == IORING_OP_SEND_ZC) {
988 		if (READ_ONCE(sqe->__pad3[0]))
989 			return -EINVAL;
990 		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
991 		zc->addr_len = READ_ONCE(sqe->addr_len);
992 	} else {
993 		if (unlikely(sqe->addr2 || sqe->file_index))
994 			return -EINVAL;
995 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
996 			return -EINVAL;
997 	}
998 
999 	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1000 	zc->len = READ_ONCE(sqe->len);
1001 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1002 	if (zc->msg_flags & MSG_DONTWAIT)
1003 		req->flags |= REQ_F_NOWAIT;
1004 
1005 	zc->done_io = 0;
1006 
1007 #ifdef CONFIG_COMPAT
1008 	if (req->ctx->compat)
1009 		zc->msg_flags |= MSG_CMSG_COMPAT;
1010 #endif
1011 	return 0;
1012 }
1013 
1014 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1015 				 struct iov_iter *from, size_t length)
1016 {
1017 	skb_zcopy_downgrade_managed(skb);
1018 	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1019 }
1020 
1021 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1022 			   struct iov_iter *from, size_t length)
1023 {
1024 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1025 	int frag = shinfo->nr_frags;
1026 	int ret = 0;
1027 	struct bvec_iter bi;
1028 	ssize_t copied = 0;
1029 	unsigned long truesize = 0;
1030 
1031 	if (!frag)
1032 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1033 	else if (unlikely(!skb_zcopy_managed(skb)))
1034 		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1035 
1036 	bi.bi_size = min(from->count, length);
1037 	bi.bi_bvec_done = from->iov_offset;
1038 	bi.bi_idx = 0;
1039 
1040 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1041 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1042 
1043 		copied += v.bv_len;
1044 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1045 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1046 					   v.bv_offset, v.bv_len);
1047 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1048 	}
1049 	if (bi.bi_size)
1050 		ret = -EMSGSIZE;
1051 
1052 	shinfo->nr_frags = frag;
1053 	from->bvec += bi.bi_idx;
1054 	from->nr_segs -= bi.bi_idx;
1055 	from->count -= copied;
1056 	from->iov_offset = bi.bi_bvec_done;
1057 
1058 	skb->data_len += copied;
1059 	skb->len += copied;
1060 	skb->truesize += truesize;
1061 
1062 	if (sk && sk->sk_type == SOCK_STREAM) {
1063 		sk_wmem_queued_add(sk, truesize);
1064 		if (!skb_zcopy_pure(skb))
1065 			sk_mem_charge(sk, truesize);
1066 	} else {
1067 		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1068 	}
1069 	return ret;
1070 }
1071 
1072 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1073 {
1074 	struct sockaddr_storage __address;
1075 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1076 	struct msghdr msg;
1077 	struct iovec iov;
1078 	struct socket *sock;
1079 	unsigned msg_flags;
1080 	int ret, min_ret = 0;
1081 
1082 	sock = sock_from_file(req->file);
1083 	if (unlikely(!sock))
1084 		return -ENOTSOCK;
1085 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1086 		return -EOPNOTSUPP;
1087 
1088 	msg.msg_name = NULL;
1089 	msg.msg_control = NULL;
1090 	msg.msg_controllen = 0;
1091 	msg.msg_namelen = 0;
1092 
1093 	if (zc->addr) {
1094 		if (req_has_async_data(req)) {
1095 			struct io_async_msghdr *io = req->async_data;
1096 
1097 			msg.msg_name = &io->addr;
1098 		} else {
1099 			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1100 			if (unlikely(ret < 0))
1101 				return ret;
1102 			msg.msg_name = (struct sockaddr *)&__address;
1103 		}
1104 		msg.msg_namelen = zc->addr_len;
1105 	}
1106 
1107 	if (!(req->flags & REQ_F_POLLED) &&
1108 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1109 		return io_setup_async_addr(req, &__address, issue_flags);
1110 
1111 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1112 		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1113 					(u64)(uintptr_t)zc->buf, zc->len);
1114 		if (unlikely(ret))
1115 			return ret;
1116 		msg.sg_from_iter = io_sg_from_iter;
1117 	} else {
1118 		io_notif_set_extended(zc->notif);
1119 		ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1120 					  &msg.msg_iter);
1121 		if (unlikely(ret))
1122 			return ret;
1123 		ret = io_notif_account_mem(zc->notif, zc->len);
1124 		if (unlikely(ret))
1125 			return ret;
1126 		msg.sg_from_iter = io_sg_from_iter_iovec;
1127 	}
1128 
1129 	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1130 	if (issue_flags & IO_URING_F_NONBLOCK)
1131 		msg_flags |= MSG_DONTWAIT;
1132 	if (msg_flags & MSG_WAITALL)
1133 		min_ret = iov_iter_count(&msg.msg_iter);
1134 
1135 	msg.msg_flags = msg_flags;
1136 	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1137 	ret = sock_sendmsg(sock, &msg);
1138 
1139 	if (unlikely(ret < min_ret)) {
1140 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1141 			return io_setup_async_addr(req, &__address, issue_flags);
1142 
1143 		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1144 			zc->len -= ret;
1145 			zc->buf += ret;
1146 			zc->done_io += ret;
1147 			req->flags |= REQ_F_PARTIAL_IO;
1148 			return io_setup_async_addr(req, &__address, issue_flags);
1149 		}
1150 		if (ret == -ERESTARTSYS)
1151 			ret = -EINTR;
1152 		req_set_fail(req);
1153 	}
1154 
1155 	if (ret >= 0)
1156 		ret += zc->done_io;
1157 	else if (zc->done_io)
1158 		ret = zc->done_io;
1159 
1160 	/*
1161 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1162 	 * flushing notif to io_send_zc_cleanup()
1163 	 */
1164 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1165 		io_notif_flush(zc->notif);
1166 		req->flags &= ~REQ_F_NEED_CLEANUP;
1167 	}
1168 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1169 	return IOU_OK;
1170 }
1171 
1172 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1173 {
1174 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1175 	struct io_async_msghdr iomsg, *kmsg;
1176 	struct socket *sock;
1177 	unsigned flags;
1178 	int ret, min_ret = 0;
1179 
1180 	io_notif_set_extended(sr->notif);
1181 
1182 	sock = sock_from_file(req->file);
1183 	if (unlikely(!sock))
1184 		return -ENOTSOCK;
1185 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1186 		return -EOPNOTSUPP;
1187 
1188 	if (req_has_async_data(req)) {
1189 		kmsg = req->async_data;
1190 	} else {
1191 		ret = io_sendmsg_copy_hdr(req, &iomsg);
1192 		if (ret)
1193 			return ret;
1194 		kmsg = &iomsg;
1195 	}
1196 
1197 	if (!(req->flags & REQ_F_POLLED) &&
1198 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1199 		return io_setup_async_msg(req, kmsg, issue_flags);
1200 
1201 	flags = sr->msg_flags | MSG_ZEROCOPY;
1202 	if (issue_flags & IO_URING_F_NONBLOCK)
1203 		flags |= MSG_DONTWAIT;
1204 	if (flags & MSG_WAITALL)
1205 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1206 
1207 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1208 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1209 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1210 
1211 	if (unlikely(ret < min_ret)) {
1212 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1213 			return io_setup_async_msg(req, kmsg, issue_flags);
1214 
1215 		if (ret > 0 && io_net_retry(sock, flags)) {
1216 			sr->done_io += ret;
1217 			req->flags |= REQ_F_PARTIAL_IO;
1218 			return io_setup_async_msg(req, kmsg, issue_flags);
1219 		}
1220 		if (ret == -ERESTARTSYS)
1221 			ret = -EINTR;
1222 		req_set_fail(req);
1223 	}
1224 	/* fast path, check for non-NULL to avoid function call */
1225 	if (kmsg->free_iov) {
1226 		kfree(kmsg->free_iov);
1227 		kmsg->free_iov = NULL;
1228 	}
1229 
1230 	io_netmsg_recycle(req, issue_flags);
1231 	if (ret >= 0)
1232 		ret += sr->done_io;
1233 	else if (sr->done_io)
1234 		ret = sr->done_io;
1235 
1236 	/*
1237 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1238 	 * flushing notif to io_send_zc_cleanup()
1239 	 */
1240 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1241 		io_notif_flush(sr->notif);
1242 		req->flags &= ~REQ_F_NEED_CLEANUP;
1243 	}
1244 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1245 	return IOU_OK;
1246 }
1247 
1248 void io_sendrecv_fail(struct io_kiocb *req)
1249 {
1250 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1251 
1252 	if (req->flags & REQ_F_PARTIAL_IO)
1253 		req->cqe.res = sr->done_io;
1254 
1255 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1256 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1257 		req->cqe.flags |= IORING_CQE_F_MORE;
1258 }
1259 
1260 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1261 {
1262 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1263 	unsigned flags;
1264 
1265 	if (sqe->len || sqe->buf_index)
1266 		return -EINVAL;
1267 
1268 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1269 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1270 	accept->flags = READ_ONCE(sqe->accept_flags);
1271 	accept->nofile = rlimit(RLIMIT_NOFILE);
1272 	flags = READ_ONCE(sqe->ioprio);
1273 	if (flags & ~IORING_ACCEPT_MULTISHOT)
1274 		return -EINVAL;
1275 
1276 	accept->file_slot = READ_ONCE(sqe->file_index);
1277 	if (accept->file_slot) {
1278 		if (accept->flags & SOCK_CLOEXEC)
1279 			return -EINVAL;
1280 		if (flags & IORING_ACCEPT_MULTISHOT &&
1281 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1282 			return -EINVAL;
1283 	}
1284 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1285 		return -EINVAL;
1286 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1287 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1288 	if (flags & IORING_ACCEPT_MULTISHOT)
1289 		req->flags |= REQ_F_APOLL_MULTISHOT;
1290 	return 0;
1291 }
1292 
1293 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1294 {
1295 	struct io_ring_ctx *ctx = req->ctx;
1296 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1297 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1298 	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1299 	bool fixed = !!accept->file_slot;
1300 	struct file *file;
1301 	int ret, fd;
1302 
1303 	if (!io_check_multishot(req, issue_flags))
1304 		return -EAGAIN;
1305 retry:
1306 	if (!fixed) {
1307 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1308 		if (unlikely(fd < 0))
1309 			return fd;
1310 	}
1311 	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1312 			 accept->flags);
1313 	if (IS_ERR(file)) {
1314 		if (!fixed)
1315 			put_unused_fd(fd);
1316 		ret = PTR_ERR(file);
1317 		if (ret == -EAGAIN && force_nonblock) {
1318 			/*
1319 			 * if it's multishot and polled, we don't need to
1320 			 * return EAGAIN to arm the poll infra since it
1321 			 * has already been done
1322 			 */
1323 			if (issue_flags & IO_URING_F_MULTISHOT)
1324 				ret = IOU_ISSUE_SKIP_COMPLETE;
1325 			return ret;
1326 		}
1327 		if (ret == -ERESTARTSYS)
1328 			ret = -EINTR;
1329 		req_set_fail(req);
1330 	} else if (!fixed) {
1331 		fd_install(fd, file);
1332 		ret = fd;
1333 	} else {
1334 		ret = io_fixed_fd_install(req, issue_flags, file,
1335 						accept->file_slot);
1336 	}
1337 
1338 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1339 		io_req_set_res(req, ret, 0);
1340 		return IOU_OK;
1341 	}
1342 
1343 	if (ret < 0)
1344 		return ret;
1345 	if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1346 		       req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1347 		goto retry;
1348 
1349 	return -ECANCELED;
1350 }
1351 
1352 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1353 {
1354 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1355 
1356 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1357 		return -EINVAL;
1358 
1359 	sock->domain = READ_ONCE(sqe->fd);
1360 	sock->type = READ_ONCE(sqe->off);
1361 	sock->protocol = READ_ONCE(sqe->len);
1362 	sock->file_slot = READ_ONCE(sqe->file_index);
1363 	sock->nofile = rlimit(RLIMIT_NOFILE);
1364 
1365 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1366 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1367 		return -EINVAL;
1368 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1369 		return -EINVAL;
1370 	return 0;
1371 }
1372 
1373 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1374 {
1375 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1376 	bool fixed = !!sock->file_slot;
1377 	struct file *file;
1378 	int ret, fd;
1379 
1380 	if (!fixed) {
1381 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1382 		if (unlikely(fd < 0))
1383 			return fd;
1384 	}
1385 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1386 	if (IS_ERR(file)) {
1387 		if (!fixed)
1388 			put_unused_fd(fd);
1389 		ret = PTR_ERR(file);
1390 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1391 			return -EAGAIN;
1392 		if (ret == -ERESTARTSYS)
1393 			ret = -EINTR;
1394 		req_set_fail(req);
1395 	} else if (!fixed) {
1396 		fd_install(fd, file);
1397 		ret = fd;
1398 	} else {
1399 		ret = io_fixed_fd_install(req, issue_flags, file,
1400 					    sock->file_slot);
1401 	}
1402 	io_req_set_res(req, ret, 0);
1403 	return IOU_OK;
1404 }
1405 
1406 int io_connect_prep_async(struct io_kiocb *req)
1407 {
1408 	struct io_async_connect *io = req->async_data;
1409 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1410 
1411 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1412 }
1413 
1414 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1415 {
1416 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1417 
1418 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1419 		return -EINVAL;
1420 
1421 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1422 	conn->addr_len =  READ_ONCE(sqe->addr2);
1423 	conn->in_progress = false;
1424 	return 0;
1425 }
1426 
1427 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1428 {
1429 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1430 	struct io_async_connect __io, *io;
1431 	unsigned file_flags;
1432 	int ret;
1433 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1434 
1435 	if (connect->in_progress) {
1436 		struct socket *socket;
1437 
1438 		ret = -ENOTSOCK;
1439 		socket = sock_from_file(req->file);
1440 		if (socket)
1441 			ret = sock_error(socket->sk);
1442 		goto out;
1443 	}
1444 
1445 	if (req_has_async_data(req)) {
1446 		io = req->async_data;
1447 	} else {
1448 		ret = move_addr_to_kernel(connect->addr,
1449 						connect->addr_len,
1450 						&__io.address);
1451 		if (ret)
1452 			goto out;
1453 		io = &__io;
1454 	}
1455 
1456 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1457 
1458 	ret = __sys_connect_file(req->file, &io->address,
1459 					connect->addr_len, file_flags);
1460 	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1461 		if (ret == -EINPROGRESS) {
1462 			connect->in_progress = true;
1463 		} else {
1464 			if (req_has_async_data(req))
1465 				return -EAGAIN;
1466 			if (io_alloc_async_data(req)) {
1467 				ret = -ENOMEM;
1468 				goto out;
1469 			}
1470 			memcpy(req->async_data, &__io, sizeof(__io));
1471 		}
1472 		return -EAGAIN;
1473 	}
1474 	if (ret == -ERESTARTSYS)
1475 		ret = -EINTR;
1476 out:
1477 	if (ret < 0)
1478 		req_set_fail(req);
1479 	io_req_set_res(req, ret, 0);
1480 	return IOU_OK;
1481 }
1482 
1483 void io_netmsg_cache_free(struct io_cache_entry *entry)
1484 {
1485 	kfree(container_of(entry, struct io_async_msghdr, cache));
1486 }
1487 #endif
1488