xref: /openbmc/linux/io_uring/net.c (revision 7c2435ef)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	u32				file_slot;
32 	unsigned long			nofile;
33 };
34 
35 struct io_socket {
36 	struct file			*file;
37 	int				domain;
38 	int				type;
39 	int				protocol;
40 	int				flags;
41 	u32				file_slot;
42 	unsigned long			nofile;
43 };
44 
45 struct io_connect {
46 	struct file			*file;
47 	struct sockaddr __user		*addr;
48 	int				addr_len;
49 	bool				in_progress;
50 	bool				seen_econnaborted;
51 };
52 
53 struct io_sr_msg {
54 	struct file			*file;
55 	union {
56 		struct compat_msghdr __user	*umsg_compat;
57 		struct user_msghdr __user	*umsg;
58 		void __user			*buf;
59 	};
60 	unsigned			len;
61 	unsigned			done_io;
62 	unsigned			msg_flags;
63 	u16				flags;
64 	/* initialised and used only by !msg send variants */
65 	u16				addr_len;
66 	u16				buf_group;
67 	void __user			*addr;
68 	/* used only for send zerocopy */
69 	struct io_kiocb 		*notif;
70 };
71 
72 static inline bool io_check_multishot(struct io_kiocb *req,
73 				      unsigned int issue_flags)
74 {
75 	/*
76 	 * When ->locked_cq is set we only allow to post CQEs from the original
77 	 * task context. Usual request completions will be handled in other
78 	 * generic paths but multipoll may decide to post extra cqes.
79 	 */
80 	return !(issue_flags & IO_URING_F_IOWQ) ||
81 		!(issue_flags & IO_URING_F_MULTISHOT) ||
82 		!req->ctx->task_complete;
83 }
84 
85 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
86 {
87 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
88 
89 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
90 		     sqe->buf_index || sqe->splice_fd_in))
91 		return -EINVAL;
92 
93 	shutdown->how = READ_ONCE(sqe->len);
94 	req->flags |= REQ_F_FORCE_ASYNC;
95 	return 0;
96 }
97 
98 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
99 {
100 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
101 	struct socket *sock;
102 	int ret;
103 
104 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
105 
106 	sock = sock_from_file(req->file);
107 	if (unlikely(!sock))
108 		return -ENOTSOCK;
109 
110 	ret = __sys_shutdown_sock(sock, shutdown->how);
111 	io_req_set_res(req, ret, 0);
112 	return IOU_OK;
113 }
114 
115 static bool io_net_retry(struct socket *sock, int flags)
116 {
117 	if (!(flags & MSG_WAITALL))
118 		return false;
119 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
120 }
121 
122 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
123 {
124 	struct io_async_msghdr *hdr = req->async_data;
125 
126 	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
127 		return;
128 
129 	/* Let normal cleanup path reap it if we fail adding to the cache */
130 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
131 		req->async_data = NULL;
132 		req->flags &= ~REQ_F_ASYNC_DATA;
133 	}
134 }
135 
136 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
137 						  unsigned int issue_flags)
138 {
139 	struct io_ring_ctx *ctx = req->ctx;
140 	struct io_cache_entry *entry;
141 	struct io_async_msghdr *hdr;
142 
143 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
144 		entry = io_alloc_cache_get(&ctx->netmsg_cache);
145 		if (entry) {
146 			hdr = container_of(entry, struct io_async_msghdr, cache);
147 			hdr->free_iov = NULL;
148 			req->flags |= REQ_F_ASYNC_DATA;
149 			req->async_data = hdr;
150 			return hdr;
151 		}
152 	}
153 
154 	if (!io_alloc_async_data(req)) {
155 		hdr = req->async_data;
156 		hdr->free_iov = NULL;
157 		return hdr;
158 	}
159 	return NULL;
160 }
161 
162 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
163 {
164 	/* ->prep_async is always called from the submission context */
165 	return io_msg_alloc_async(req, 0);
166 }
167 
168 static int io_setup_async_msg(struct io_kiocb *req,
169 			      struct io_async_msghdr *kmsg,
170 			      unsigned int issue_flags)
171 {
172 	struct io_async_msghdr *async_msg;
173 
174 	if (req_has_async_data(req))
175 		return -EAGAIN;
176 	async_msg = io_msg_alloc_async(req, issue_flags);
177 	if (!async_msg) {
178 		kfree(kmsg->free_iov);
179 		return -ENOMEM;
180 	}
181 	req->flags |= REQ_F_NEED_CLEANUP;
182 	memcpy(async_msg, kmsg, sizeof(*kmsg));
183 	if (async_msg->msg.msg_name)
184 		async_msg->msg.msg_name = &async_msg->addr;
185 	/* if were using fast_iov, set it to the new one */
186 	if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
187 		size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
188 		async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
189 	}
190 
191 	return -EAGAIN;
192 }
193 
194 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
195 			       struct io_async_msghdr *iomsg)
196 {
197 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
198 
199 	iomsg->msg.msg_name = &iomsg->addr;
200 	iomsg->free_iov = iomsg->fast_iov;
201 	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
202 					&iomsg->free_iov);
203 }
204 
205 int io_send_prep_async(struct io_kiocb *req)
206 {
207 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
208 	struct io_async_msghdr *io;
209 	int ret;
210 
211 	if (!zc->addr || req_has_async_data(req))
212 		return 0;
213 	io = io_msg_alloc_async_prep(req);
214 	if (!io)
215 		return -ENOMEM;
216 	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
217 	return ret;
218 }
219 
220 static int io_setup_async_addr(struct io_kiocb *req,
221 			      struct sockaddr_storage *addr_storage,
222 			      unsigned int issue_flags)
223 {
224 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
225 	struct io_async_msghdr *io;
226 
227 	if (!sr->addr || req_has_async_data(req))
228 		return -EAGAIN;
229 	io = io_msg_alloc_async(req, issue_flags);
230 	if (!io)
231 		return -ENOMEM;
232 	memcpy(&io->addr, addr_storage, sizeof(io->addr));
233 	return -EAGAIN;
234 }
235 
236 int io_sendmsg_prep_async(struct io_kiocb *req)
237 {
238 	int ret;
239 
240 	if (!io_msg_alloc_async_prep(req))
241 		return -ENOMEM;
242 	ret = io_sendmsg_copy_hdr(req, req->async_data);
243 	if (!ret)
244 		req->flags |= REQ_F_NEED_CLEANUP;
245 	return ret;
246 }
247 
248 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
249 {
250 	struct io_async_msghdr *io = req->async_data;
251 
252 	kfree(io->free_iov);
253 }
254 
255 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
256 {
257 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
258 
259 	if (req->opcode == IORING_OP_SEND) {
260 		if (READ_ONCE(sqe->__pad3[0]))
261 			return -EINVAL;
262 		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
263 		sr->addr_len = READ_ONCE(sqe->addr_len);
264 	} else if (sqe->addr2 || sqe->file_index) {
265 		return -EINVAL;
266 	}
267 
268 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
269 	sr->len = READ_ONCE(sqe->len);
270 	sr->flags = READ_ONCE(sqe->ioprio);
271 	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
272 		return -EINVAL;
273 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
274 	if (sr->msg_flags & MSG_DONTWAIT)
275 		req->flags |= REQ_F_NOWAIT;
276 
277 #ifdef CONFIG_COMPAT
278 	if (req->ctx->compat)
279 		sr->msg_flags |= MSG_CMSG_COMPAT;
280 #endif
281 	sr->done_io = 0;
282 	return 0;
283 }
284 
285 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
286 {
287 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
288 	struct io_async_msghdr iomsg, *kmsg;
289 	struct socket *sock;
290 	unsigned flags;
291 	int min_ret = 0;
292 	int ret;
293 
294 	sock = sock_from_file(req->file);
295 	if (unlikely(!sock))
296 		return -ENOTSOCK;
297 
298 	if (req_has_async_data(req)) {
299 		kmsg = req->async_data;
300 	} else {
301 		ret = io_sendmsg_copy_hdr(req, &iomsg);
302 		if (ret)
303 			return ret;
304 		kmsg = &iomsg;
305 	}
306 
307 	if (!(req->flags & REQ_F_POLLED) &&
308 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
309 		return io_setup_async_msg(req, kmsg, issue_flags);
310 
311 	flags = sr->msg_flags;
312 	if (issue_flags & IO_URING_F_NONBLOCK)
313 		flags |= MSG_DONTWAIT;
314 	if (flags & MSG_WAITALL)
315 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
316 
317 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
318 
319 	if (ret < min_ret) {
320 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
321 			return io_setup_async_msg(req, kmsg, issue_flags);
322 		if (ret > 0 && io_net_retry(sock, flags)) {
323 			sr->done_io += ret;
324 			req->flags |= REQ_F_PARTIAL_IO;
325 			return io_setup_async_msg(req, kmsg, issue_flags);
326 		}
327 		if (ret == -ERESTARTSYS)
328 			ret = -EINTR;
329 		req_set_fail(req);
330 	}
331 	/* fast path, check for non-NULL to avoid function call */
332 	if (kmsg->free_iov)
333 		kfree(kmsg->free_iov);
334 	req->flags &= ~REQ_F_NEED_CLEANUP;
335 	io_netmsg_recycle(req, issue_flags);
336 	if (ret >= 0)
337 		ret += sr->done_io;
338 	else if (sr->done_io)
339 		ret = sr->done_io;
340 	io_req_set_res(req, ret, 0);
341 	return IOU_OK;
342 }
343 
344 int io_send(struct io_kiocb *req, unsigned int issue_flags)
345 {
346 	struct sockaddr_storage __address;
347 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
348 	struct msghdr msg;
349 	struct socket *sock;
350 	unsigned flags;
351 	int min_ret = 0;
352 	int ret;
353 
354 	msg.msg_name = NULL;
355 	msg.msg_control = NULL;
356 	msg.msg_controllen = 0;
357 	msg.msg_namelen = 0;
358 	msg.msg_ubuf = NULL;
359 
360 	if (sr->addr) {
361 		if (req_has_async_data(req)) {
362 			struct io_async_msghdr *io = req->async_data;
363 
364 			msg.msg_name = &io->addr;
365 		} else {
366 			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
367 			if (unlikely(ret < 0))
368 				return ret;
369 			msg.msg_name = (struct sockaddr *)&__address;
370 		}
371 		msg.msg_namelen = sr->addr_len;
372 	}
373 
374 	if (!(req->flags & REQ_F_POLLED) &&
375 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
376 		return io_setup_async_addr(req, &__address, issue_flags);
377 
378 	sock = sock_from_file(req->file);
379 	if (unlikely(!sock))
380 		return -ENOTSOCK;
381 
382 	ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
383 	if (unlikely(ret))
384 		return ret;
385 
386 	flags = sr->msg_flags;
387 	if (issue_flags & IO_URING_F_NONBLOCK)
388 		flags |= MSG_DONTWAIT;
389 	if (flags & MSG_WAITALL)
390 		min_ret = iov_iter_count(&msg.msg_iter);
391 
392 	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
393 	msg.msg_flags = flags;
394 	ret = sock_sendmsg(sock, &msg);
395 	if (ret < min_ret) {
396 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
397 			return io_setup_async_addr(req, &__address, issue_flags);
398 
399 		if (ret > 0 && io_net_retry(sock, flags)) {
400 			sr->len -= ret;
401 			sr->buf += ret;
402 			sr->done_io += ret;
403 			req->flags |= REQ_F_PARTIAL_IO;
404 			return io_setup_async_addr(req, &__address, issue_flags);
405 		}
406 		if (ret == -ERESTARTSYS)
407 			ret = -EINTR;
408 		req_set_fail(req);
409 	}
410 	if (ret >= 0)
411 		ret += sr->done_io;
412 	else if (sr->done_io)
413 		ret = sr->done_io;
414 	io_req_set_res(req, ret, 0);
415 	return IOU_OK;
416 }
417 
418 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
419 {
420 	int hdr;
421 
422 	if (iomsg->namelen < 0)
423 		return true;
424 	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
425 			       iomsg->namelen, &hdr))
426 		return true;
427 	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
428 		return true;
429 
430 	return false;
431 }
432 
433 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
434 				 struct io_async_msghdr *iomsg)
435 {
436 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
437 	struct user_msghdr msg;
438 	int ret;
439 
440 	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
441 		return -EFAULT;
442 
443 	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
444 	if (ret)
445 		return ret;
446 
447 	if (req->flags & REQ_F_BUFFER_SELECT) {
448 		if (msg.msg_iovlen == 0) {
449 			sr->len = iomsg->fast_iov[0].iov_len = 0;
450 			iomsg->fast_iov[0].iov_base = NULL;
451 			iomsg->free_iov = NULL;
452 		} else if (msg.msg_iovlen > 1) {
453 			return -EINVAL;
454 		} else {
455 			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
456 				return -EFAULT;
457 			sr->len = iomsg->fast_iov[0].iov_len;
458 			iomsg->free_iov = NULL;
459 		}
460 
461 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
462 			iomsg->namelen = msg.msg_namelen;
463 			iomsg->controllen = msg.msg_controllen;
464 			if (io_recvmsg_multishot_overflow(iomsg))
465 				return -EOVERFLOW;
466 		}
467 	} else {
468 		iomsg->free_iov = iomsg->fast_iov;
469 		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
470 				     &iomsg->free_iov, &iomsg->msg.msg_iter,
471 				     false);
472 		if (ret > 0)
473 			ret = 0;
474 	}
475 
476 	return ret;
477 }
478 
479 #ifdef CONFIG_COMPAT
480 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
481 					struct io_async_msghdr *iomsg)
482 {
483 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
484 	struct compat_msghdr msg;
485 	struct compat_iovec __user *uiov;
486 	int ret;
487 
488 	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
489 		return -EFAULT;
490 
491 	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
492 	if (ret)
493 		return ret;
494 
495 	uiov = compat_ptr(msg.msg_iov);
496 	if (req->flags & REQ_F_BUFFER_SELECT) {
497 		compat_ssize_t clen;
498 
499 		iomsg->free_iov = NULL;
500 		if (msg.msg_iovlen == 0) {
501 			sr->len = 0;
502 		} else if (msg.msg_iovlen > 1) {
503 			return -EINVAL;
504 		} else {
505 			if (!access_ok(uiov, sizeof(*uiov)))
506 				return -EFAULT;
507 			if (__get_user(clen, &uiov->iov_len))
508 				return -EFAULT;
509 			if (clen < 0)
510 				return -EINVAL;
511 			sr->len = clen;
512 		}
513 
514 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
515 			iomsg->namelen = msg.msg_namelen;
516 			iomsg->controllen = msg.msg_controllen;
517 			if (io_recvmsg_multishot_overflow(iomsg))
518 				return -EOVERFLOW;
519 		}
520 	} else {
521 		iomsg->free_iov = iomsg->fast_iov;
522 		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
523 				   UIO_FASTIOV, &iomsg->free_iov,
524 				   &iomsg->msg.msg_iter, true);
525 		if (ret < 0)
526 			return ret;
527 	}
528 
529 	return 0;
530 }
531 #endif
532 
533 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
534 			       struct io_async_msghdr *iomsg)
535 {
536 	iomsg->msg.msg_name = &iomsg->addr;
537 
538 #ifdef CONFIG_COMPAT
539 	if (req->ctx->compat)
540 		return __io_compat_recvmsg_copy_hdr(req, iomsg);
541 #endif
542 
543 	return __io_recvmsg_copy_hdr(req, iomsg);
544 }
545 
546 int io_recvmsg_prep_async(struct io_kiocb *req)
547 {
548 	int ret;
549 
550 	if (!io_msg_alloc_async_prep(req))
551 		return -ENOMEM;
552 	ret = io_recvmsg_copy_hdr(req, req->async_data);
553 	if (!ret)
554 		req->flags |= REQ_F_NEED_CLEANUP;
555 	return ret;
556 }
557 
558 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
559 
560 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
561 {
562 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
563 
564 	if (unlikely(sqe->file_index || sqe->addr2))
565 		return -EINVAL;
566 
567 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
568 	sr->len = READ_ONCE(sqe->len);
569 	sr->flags = READ_ONCE(sqe->ioprio);
570 	if (sr->flags & ~(RECVMSG_FLAGS))
571 		return -EINVAL;
572 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
573 	if (sr->msg_flags & MSG_DONTWAIT)
574 		req->flags |= REQ_F_NOWAIT;
575 	if (sr->msg_flags & MSG_ERRQUEUE)
576 		req->flags |= REQ_F_CLEAR_POLLIN;
577 	if (sr->flags & IORING_RECV_MULTISHOT) {
578 		if (!(req->flags & REQ_F_BUFFER_SELECT))
579 			return -EINVAL;
580 		if (sr->msg_flags & MSG_WAITALL)
581 			return -EINVAL;
582 		if (req->opcode == IORING_OP_RECV && sr->len)
583 			return -EINVAL;
584 		req->flags |= REQ_F_APOLL_MULTISHOT;
585 		/*
586 		 * Store the buffer group for this multishot receive separately,
587 		 * as if we end up doing an io-wq based issue that selects a
588 		 * buffer, it has to be committed immediately and that will
589 		 * clear ->buf_list. This means we lose the link to the buffer
590 		 * list, and the eventual buffer put on completion then cannot
591 		 * restore it.
592 		 */
593 		sr->buf_group = req->buf_index;
594 	}
595 
596 #ifdef CONFIG_COMPAT
597 	if (req->ctx->compat)
598 		sr->msg_flags |= MSG_CMSG_COMPAT;
599 #endif
600 	sr->done_io = 0;
601 	return 0;
602 }
603 
604 static inline void io_recv_prep_retry(struct io_kiocb *req)
605 {
606 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
607 
608 	sr->done_io = 0;
609 	sr->len = 0; /* get from the provided buffer */
610 	req->buf_index = sr->buf_group;
611 }
612 
613 /*
614  * Finishes io_recv and io_recvmsg.
615  *
616  * Returns true if it is actually finished, or false if it should run
617  * again (for multishot).
618  */
619 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
620 				  unsigned int cflags, bool mshot_finished,
621 				  unsigned issue_flags)
622 {
623 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
624 		io_req_set_res(req, *ret, cflags);
625 		*ret = IOU_OK;
626 		return true;
627 	}
628 
629 	if (!mshot_finished) {
630 		if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
631 			       req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
632 			io_recv_prep_retry(req);
633 			return false;
634 		}
635 		/* Otherwise stop multishot but use the current result. */
636 	}
637 
638 	io_req_set_res(req, *ret, cflags);
639 
640 	if (issue_flags & IO_URING_F_MULTISHOT)
641 		*ret = IOU_STOP_MULTISHOT;
642 	else
643 		*ret = IOU_OK;
644 	return true;
645 }
646 
647 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
648 				     struct io_sr_msg *sr, void __user **buf,
649 				     size_t *len)
650 {
651 	unsigned long ubuf = (unsigned long) *buf;
652 	unsigned long hdr;
653 
654 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
655 		kmsg->controllen;
656 	if (*len < hdr)
657 		return -EFAULT;
658 
659 	if (kmsg->controllen) {
660 		unsigned long control = ubuf + hdr - kmsg->controllen;
661 
662 		kmsg->msg.msg_control_user = (void __user *) control;
663 		kmsg->msg.msg_controllen = kmsg->controllen;
664 	}
665 
666 	sr->buf = *buf; /* stash for later copy */
667 	*buf = (void __user *) (ubuf + hdr);
668 	kmsg->payloadlen = *len = *len - hdr;
669 	return 0;
670 }
671 
672 struct io_recvmsg_multishot_hdr {
673 	struct io_uring_recvmsg_out msg;
674 	struct sockaddr_storage addr;
675 };
676 
677 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
678 				struct io_async_msghdr *kmsg,
679 				unsigned int flags, bool *finished)
680 {
681 	int err;
682 	int copy_len;
683 	struct io_recvmsg_multishot_hdr hdr;
684 
685 	if (kmsg->namelen)
686 		kmsg->msg.msg_name = &hdr.addr;
687 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
688 	kmsg->msg.msg_namelen = 0;
689 
690 	if (sock->file->f_flags & O_NONBLOCK)
691 		flags |= MSG_DONTWAIT;
692 
693 	err = sock_recvmsg(sock, &kmsg->msg, flags);
694 	*finished = err <= 0;
695 	if (err < 0)
696 		return err;
697 
698 	hdr.msg = (struct io_uring_recvmsg_out) {
699 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
700 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
701 	};
702 
703 	hdr.msg.payloadlen = err;
704 	if (err > kmsg->payloadlen)
705 		err = kmsg->payloadlen;
706 
707 	copy_len = sizeof(struct io_uring_recvmsg_out);
708 	if (kmsg->msg.msg_namelen > kmsg->namelen)
709 		copy_len += kmsg->namelen;
710 	else
711 		copy_len += kmsg->msg.msg_namelen;
712 
713 	/*
714 	 *      "fromlen shall refer to the value before truncation.."
715 	 *                      1003.1g
716 	 */
717 	hdr.msg.namelen = kmsg->msg.msg_namelen;
718 
719 	/* ensure that there is no gap between hdr and sockaddr_storage */
720 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
721 		     sizeof(struct io_uring_recvmsg_out));
722 	if (copy_to_user(io->buf, &hdr, copy_len)) {
723 		*finished = true;
724 		return -EFAULT;
725 	}
726 
727 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
728 			kmsg->controllen + err;
729 }
730 
731 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
732 {
733 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
734 	struct io_async_msghdr iomsg, *kmsg;
735 	struct socket *sock;
736 	unsigned int cflags;
737 	unsigned flags;
738 	int ret, min_ret = 0;
739 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
740 	bool mshot_finished = true;
741 
742 	sock = sock_from_file(req->file);
743 	if (unlikely(!sock))
744 		return -ENOTSOCK;
745 
746 	if (req_has_async_data(req)) {
747 		kmsg = req->async_data;
748 	} else {
749 		ret = io_recvmsg_copy_hdr(req, &iomsg);
750 		if (ret)
751 			return ret;
752 		kmsg = &iomsg;
753 	}
754 
755 	if (!(req->flags & REQ_F_POLLED) &&
756 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
757 		return io_setup_async_msg(req, kmsg, issue_flags);
758 
759 	if (!io_check_multishot(req, issue_flags))
760 		return io_setup_async_msg(req, kmsg, issue_flags);
761 
762 retry_multishot:
763 	if (io_do_buffer_select(req)) {
764 		void __user *buf;
765 		size_t len = sr->len;
766 
767 		buf = io_buffer_select(req, &len, issue_flags);
768 		if (!buf)
769 			return -ENOBUFS;
770 
771 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
772 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
773 			if (ret) {
774 				io_kbuf_recycle(req, issue_flags);
775 				return ret;
776 			}
777 		}
778 
779 		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
780 	}
781 
782 	flags = sr->msg_flags;
783 	if (force_nonblock)
784 		flags |= MSG_DONTWAIT;
785 	if (flags & MSG_WAITALL)
786 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
787 
788 	kmsg->msg.msg_get_inq = 1;
789 	if (req->flags & REQ_F_APOLL_MULTISHOT)
790 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
791 					   &mshot_finished);
792 	else
793 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
794 					 kmsg->uaddr, flags);
795 
796 	if (ret < min_ret) {
797 		if (ret == -EAGAIN && force_nonblock) {
798 			ret = io_setup_async_msg(req, kmsg, issue_flags);
799 			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
800 				io_kbuf_recycle(req, issue_flags);
801 				return IOU_ISSUE_SKIP_COMPLETE;
802 			}
803 			return ret;
804 		}
805 		if (ret > 0 && io_net_retry(sock, flags)) {
806 			sr->done_io += ret;
807 			req->flags |= REQ_F_PARTIAL_IO;
808 			return io_setup_async_msg(req, kmsg, issue_flags);
809 		}
810 		if (ret == -ERESTARTSYS)
811 			ret = -EINTR;
812 		req_set_fail(req);
813 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
814 		req_set_fail(req);
815 	}
816 
817 	if (ret > 0)
818 		ret += sr->done_io;
819 	else if (sr->done_io)
820 		ret = sr->done_io;
821 	else
822 		io_kbuf_recycle(req, issue_flags);
823 
824 	cflags = io_put_kbuf(req, issue_flags);
825 	if (kmsg->msg.msg_inq)
826 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
827 
828 	if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
829 		goto retry_multishot;
830 
831 	if (mshot_finished) {
832 		/* fast path, check for non-NULL to avoid function call */
833 		if (kmsg->free_iov)
834 			kfree(kmsg->free_iov);
835 		io_netmsg_recycle(req, issue_flags);
836 		req->flags &= ~REQ_F_NEED_CLEANUP;
837 	}
838 
839 	return ret;
840 }
841 
842 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
843 {
844 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
845 	struct msghdr msg;
846 	struct socket *sock;
847 	unsigned int cflags;
848 	unsigned flags;
849 	int ret, min_ret = 0;
850 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
851 	size_t len = sr->len;
852 
853 	if (!(req->flags & REQ_F_POLLED) &&
854 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
855 		return -EAGAIN;
856 
857 	if (!io_check_multishot(req, issue_flags))
858 		return -EAGAIN;
859 
860 	sock = sock_from_file(req->file);
861 	if (unlikely(!sock))
862 		return -ENOTSOCK;
863 
864 retry_multishot:
865 	if (io_do_buffer_select(req)) {
866 		void __user *buf;
867 
868 		buf = io_buffer_select(req, &len, issue_flags);
869 		if (!buf)
870 			return -ENOBUFS;
871 		sr->buf = buf;
872 	}
873 
874 	ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
875 	if (unlikely(ret))
876 		goto out_free;
877 
878 	msg.msg_name = NULL;
879 	msg.msg_namelen = 0;
880 	msg.msg_control = NULL;
881 	msg.msg_get_inq = 1;
882 	msg.msg_flags = 0;
883 	msg.msg_controllen = 0;
884 	msg.msg_iocb = NULL;
885 	msg.msg_ubuf = NULL;
886 
887 	flags = sr->msg_flags;
888 	if (force_nonblock)
889 		flags |= MSG_DONTWAIT;
890 	if (flags & MSG_WAITALL)
891 		min_ret = iov_iter_count(&msg.msg_iter);
892 
893 	ret = sock_recvmsg(sock, &msg, flags);
894 	if (ret < min_ret) {
895 		if (ret == -EAGAIN && force_nonblock) {
896 			if (issue_flags & IO_URING_F_MULTISHOT) {
897 				io_kbuf_recycle(req, issue_flags);
898 				return IOU_ISSUE_SKIP_COMPLETE;
899 			}
900 
901 			return -EAGAIN;
902 		}
903 		if (ret > 0 && io_net_retry(sock, flags)) {
904 			sr->len -= ret;
905 			sr->buf += ret;
906 			sr->done_io += ret;
907 			req->flags |= REQ_F_PARTIAL_IO;
908 			return -EAGAIN;
909 		}
910 		if (ret == -ERESTARTSYS)
911 			ret = -EINTR;
912 		req_set_fail(req);
913 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
914 out_free:
915 		req_set_fail(req);
916 	}
917 
918 	if (ret > 0)
919 		ret += sr->done_io;
920 	else if (sr->done_io)
921 		ret = sr->done_io;
922 	else
923 		io_kbuf_recycle(req, issue_flags);
924 
925 	cflags = io_put_kbuf(req, issue_flags);
926 	if (msg.msg_inq)
927 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
928 
929 	if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
930 		goto retry_multishot;
931 
932 	return ret;
933 }
934 
935 void io_send_zc_cleanup(struct io_kiocb *req)
936 {
937 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
938 	struct io_async_msghdr *io;
939 
940 	if (req_has_async_data(req)) {
941 		io = req->async_data;
942 		/* might be ->fast_iov if *msg_copy_hdr failed */
943 		if (io->free_iov != io->fast_iov)
944 			kfree(io->free_iov);
945 	}
946 	if (zc->notif) {
947 		io_notif_flush(zc->notif);
948 		zc->notif = NULL;
949 	}
950 }
951 
952 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
953 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
954 
955 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
956 {
957 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
958 	struct io_ring_ctx *ctx = req->ctx;
959 	struct io_kiocb *notif;
960 
961 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
962 		return -EINVAL;
963 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
964 	if (req->flags & REQ_F_CQE_SKIP)
965 		return -EINVAL;
966 
967 	notif = zc->notif = io_alloc_notif(ctx);
968 	if (!notif)
969 		return -ENOMEM;
970 	notif->cqe.user_data = req->cqe.user_data;
971 	notif->cqe.res = 0;
972 	notif->cqe.flags = IORING_CQE_F_NOTIF;
973 	req->flags |= REQ_F_NEED_CLEANUP;
974 
975 	zc->flags = READ_ONCE(sqe->ioprio);
976 	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
977 		if (zc->flags & ~IO_ZC_FLAGS_VALID)
978 			return -EINVAL;
979 		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
980 			io_notif_set_extended(notif);
981 			io_notif_to_data(notif)->zc_report = true;
982 		}
983 	}
984 
985 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
986 		unsigned idx = READ_ONCE(sqe->buf_index);
987 
988 		if (unlikely(idx >= ctx->nr_user_bufs))
989 			return -EFAULT;
990 		idx = array_index_nospec(idx, ctx->nr_user_bufs);
991 		req->imu = READ_ONCE(ctx->user_bufs[idx]);
992 		io_req_set_rsrc_node(notif, ctx, 0);
993 	}
994 
995 	if (req->opcode == IORING_OP_SEND_ZC) {
996 		if (READ_ONCE(sqe->__pad3[0]))
997 			return -EINVAL;
998 		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
999 		zc->addr_len = READ_ONCE(sqe->addr_len);
1000 	} else {
1001 		if (unlikely(sqe->addr2 || sqe->file_index))
1002 			return -EINVAL;
1003 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1004 			return -EINVAL;
1005 	}
1006 
1007 	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1008 	zc->len = READ_ONCE(sqe->len);
1009 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1010 	if (zc->msg_flags & MSG_DONTWAIT)
1011 		req->flags |= REQ_F_NOWAIT;
1012 
1013 	zc->done_io = 0;
1014 
1015 #ifdef CONFIG_COMPAT
1016 	if (req->ctx->compat)
1017 		zc->msg_flags |= MSG_CMSG_COMPAT;
1018 #endif
1019 	return 0;
1020 }
1021 
1022 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1023 				 struct iov_iter *from, size_t length)
1024 {
1025 	skb_zcopy_downgrade_managed(skb);
1026 	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1027 }
1028 
1029 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1030 			   struct iov_iter *from, size_t length)
1031 {
1032 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1033 	int frag = shinfo->nr_frags;
1034 	int ret = 0;
1035 	struct bvec_iter bi;
1036 	ssize_t copied = 0;
1037 	unsigned long truesize = 0;
1038 
1039 	if (!frag)
1040 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1041 	else if (unlikely(!skb_zcopy_managed(skb)))
1042 		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1043 
1044 	bi.bi_size = min(from->count, length);
1045 	bi.bi_bvec_done = from->iov_offset;
1046 	bi.bi_idx = 0;
1047 
1048 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1049 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1050 
1051 		copied += v.bv_len;
1052 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1053 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1054 					   v.bv_offset, v.bv_len);
1055 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1056 	}
1057 	if (bi.bi_size)
1058 		ret = -EMSGSIZE;
1059 
1060 	shinfo->nr_frags = frag;
1061 	from->bvec += bi.bi_idx;
1062 	from->nr_segs -= bi.bi_idx;
1063 	from->count -= copied;
1064 	from->iov_offset = bi.bi_bvec_done;
1065 
1066 	skb->data_len += copied;
1067 	skb->len += copied;
1068 	skb->truesize += truesize;
1069 
1070 	if (sk && sk->sk_type == SOCK_STREAM) {
1071 		sk_wmem_queued_add(sk, truesize);
1072 		if (!skb_zcopy_pure(skb))
1073 			sk_mem_charge(sk, truesize);
1074 	} else {
1075 		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1076 	}
1077 	return ret;
1078 }
1079 
1080 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1081 {
1082 	struct sockaddr_storage __address;
1083 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1084 	struct msghdr msg;
1085 	struct socket *sock;
1086 	unsigned msg_flags;
1087 	int ret, min_ret = 0;
1088 
1089 	sock = sock_from_file(req->file);
1090 	if (unlikely(!sock))
1091 		return -ENOTSOCK;
1092 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1093 		return -EOPNOTSUPP;
1094 
1095 	msg.msg_name = NULL;
1096 	msg.msg_control = NULL;
1097 	msg.msg_controllen = 0;
1098 	msg.msg_namelen = 0;
1099 
1100 	if (zc->addr) {
1101 		if (req_has_async_data(req)) {
1102 			struct io_async_msghdr *io = req->async_data;
1103 
1104 			msg.msg_name = &io->addr;
1105 		} else {
1106 			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1107 			if (unlikely(ret < 0))
1108 				return ret;
1109 			msg.msg_name = (struct sockaddr *)&__address;
1110 		}
1111 		msg.msg_namelen = zc->addr_len;
1112 	}
1113 
1114 	if (!(req->flags & REQ_F_POLLED) &&
1115 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1116 		return io_setup_async_addr(req, &__address, issue_flags);
1117 
1118 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1119 		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1120 					(u64)(uintptr_t)zc->buf, zc->len);
1121 		if (unlikely(ret))
1122 			return ret;
1123 		msg.sg_from_iter = io_sg_from_iter;
1124 	} else {
1125 		io_notif_set_extended(zc->notif);
1126 		ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1127 		if (unlikely(ret))
1128 			return ret;
1129 		ret = io_notif_account_mem(zc->notif, zc->len);
1130 		if (unlikely(ret))
1131 			return ret;
1132 		msg.sg_from_iter = io_sg_from_iter_iovec;
1133 	}
1134 
1135 	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1136 	if (issue_flags & IO_URING_F_NONBLOCK)
1137 		msg_flags |= MSG_DONTWAIT;
1138 	if (msg_flags & MSG_WAITALL)
1139 		min_ret = iov_iter_count(&msg.msg_iter);
1140 	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1141 
1142 	msg.msg_flags = msg_flags;
1143 	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1144 	ret = sock_sendmsg(sock, &msg);
1145 
1146 	if (unlikely(ret < min_ret)) {
1147 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1148 			return io_setup_async_addr(req, &__address, issue_flags);
1149 
1150 		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1151 			zc->len -= ret;
1152 			zc->buf += ret;
1153 			zc->done_io += ret;
1154 			req->flags |= REQ_F_PARTIAL_IO;
1155 			return io_setup_async_addr(req, &__address, issue_flags);
1156 		}
1157 		if (ret == -ERESTARTSYS)
1158 			ret = -EINTR;
1159 		req_set_fail(req);
1160 	}
1161 
1162 	if (ret >= 0)
1163 		ret += zc->done_io;
1164 	else if (zc->done_io)
1165 		ret = zc->done_io;
1166 
1167 	/*
1168 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1169 	 * flushing notif to io_send_zc_cleanup()
1170 	 */
1171 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1172 		io_notif_flush(zc->notif);
1173 		req->flags &= ~REQ_F_NEED_CLEANUP;
1174 	}
1175 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1176 	return IOU_OK;
1177 }
1178 
1179 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1180 {
1181 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1182 	struct io_async_msghdr iomsg, *kmsg;
1183 	struct socket *sock;
1184 	unsigned flags;
1185 	int ret, min_ret = 0;
1186 
1187 	io_notif_set_extended(sr->notif);
1188 
1189 	sock = sock_from_file(req->file);
1190 	if (unlikely(!sock))
1191 		return -ENOTSOCK;
1192 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1193 		return -EOPNOTSUPP;
1194 
1195 	if (req_has_async_data(req)) {
1196 		kmsg = req->async_data;
1197 	} else {
1198 		ret = io_sendmsg_copy_hdr(req, &iomsg);
1199 		if (ret)
1200 			return ret;
1201 		kmsg = &iomsg;
1202 	}
1203 
1204 	if (!(req->flags & REQ_F_POLLED) &&
1205 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1206 		return io_setup_async_msg(req, kmsg, issue_flags);
1207 
1208 	flags = sr->msg_flags | MSG_ZEROCOPY;
1209 	if (issue_flags & IO_URING_F_NONBLOCK)
1210 		flags |= MSG_DONTWAIT;
1211 	if (flags & MSG_WAITALL)
1212 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1213 
1214 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1215 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1216 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1217 
1218 	if (unlikely(ret < min_ret)) {
1219 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1220 			return io_setup_async_msg(req, kmsg, issue_flags);
1221 
1222 		if (ret > 0 && io_net_retry(sock, flags)) {
1223 			sr->done_io += ret;
1224 			req->flags |= REQ_F_PARTIAL_IO;
1225 			return io_setup_async_msg(req, kmsg, issue_flags);
1226 		}
1227 		if (ret == -ERESTARTSYS)
1228 			ret = -EINTR;
1229 		req_set_fail(req);
1230 	}
1231 	/* fast path, check for non-NULL to avoid function call */
1232 	if (kmsg->free_iov) {
1233 		kfree(kmsg->free_iov);
1234 		kmsg->free_iov = NULL;
1235 	}
1236 
1237 	io_netmsg_recycle(req, issue_flags);
1238 	if (ret >= 0)
1239 		ret += sr->done_io;
1240 	else if (sr->done_io)
1241 		ret = sr->done_io;
1242 
1243 	/*
1244 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1245 	 * flushing notif to io_send_zc_cleanup()
1246 	 */
1247 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1248 		io_notif_flush(sr->notif);
1249 		req->flags &= ~REQ_F_NEED_CLEANUP;
1250 	}
1251 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1252 	return IOU_OK;
1253 }
1254 
1255 void io_sendrecv_fail(struct io_kiocb *req)
1256 {
1257 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1258 
1259 	if (req->flags & REQ_F_PARTIAL_IO)
1260 		req->cqe.res = sr->done_io;
1261 
1262 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1263 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1264 		req->cqe.flags |= IORING_CQE_F_MORE;
1265 }
1266 
1267 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1268 {
1269 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1270 	unsigned flags;
1271 
1272 	if (sqe->len || sqe->buf_index)
1273 		return -EINVAL;
1274 
1275 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1276 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1277 	accept->flags = READ_ONCE(sqe->accept_flags);
1278 	accept->nofile = rlimit(RLIMIT_NOFILE);
1279 	flags = READ_ONCE(sqe->ioprio);
1280 	if (flags & ~IORING_ACCEPT_MULTISHOT)
1281 		return -EINVAL;
1282 
1283 	accept->file_slot = READ_ONCE(sqe->file_index);
1284 	if (accept->file_slot) {
1285 		if (accept->flags & SOCK_CLOEXEC)
1286 			return -EINVAL;
1287 		if (flags & IORING_ACCEPT_MULTISHOT &&
1288 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1289 			return -EINVAL;
1290 	}
1291 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1292 		return -EINVAL;
1293 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1294 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1295 	if (flags & IORING_ACCEPT_MULTISHOT)
1296 		req->flags |= REQ_F_APOLL_MULTISHOT;
1297 	return 0;
1298 }
1299 
1300 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1301 {
1302 	struct io_ring_ctx *ctx = req->ctx;
1303 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1304 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1305 	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1306 	bool fixed = !!accept->file_slot;
1307 	struct file *file;
1308 	int ret, fd;
1309 
1310 	if (!io_check_multishot(req, issue_flags))
1311 		return -EAGAIN;
1312 retry:
1313 	if (!fixed) {
1314 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1315 		if (unlikely(fd < 0))
1316 			return fd;
1317 	}
1318 	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1319 			 accept->flags);
1320 	if (IS_ERR(file)) {
1321 		if (!fixed)
1322 			put_unused_fd(fd);
1323 		ret = PTR_ERR(file);
1324 		if (ret == -EAGAIN && force_nonblock) {
1325 			/*
1326 			 * if it's multishot and polled, we don't need to
1327 			 * return EAGAIN to arm the poll infra since it
1328 			 * has already been done
1329 			 */
1330 			if (issue_flags & IO_URING_F_MULTISHOT)
1331 				ret = IOU_ISSUE_SKIP_COMPLETE;
1332 			return ret;
1333 		}
1334 		if (ret == -ERESTARTSYS)
1335 			ret = -EINTR;
1336 		req_set_fail(req);
1337 	} else if (!fixed) {
1338 		fd_install(fd, file);
1339 		ret = fd;
1340 	} else {
1341 		ret = io_fixed_fd_install(req, issue_flags, file,
1342 						accept->file_slot);
1343 	}
1344 
1345 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1346 		io_req_set_res(req, ret, 0);
1347 		return IOU_OK;
1348 	}
1349 
1350 	if (ret < 0)
1351 		return ret;
1352 	if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1353 		       req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1354 		goto retry;
1355 
1356 	return -ECANCELED;
1357 }
1358 
1359 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1360 {
1361 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1362 
1363 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1364 		return -EINVAL;
1365 
1366 	sock->domain = READ_ONCE(sqe->fd);
1367 	sock->type = READ_ONCE(sqe->off);
1368 	sock->protocol = READ_ONCE(sqe->len);
1369 	sock->file_slot = READ_ONCE(sqe->file_index);
1370 	sock->nofile = rlimit(RLIMIT_NOFILE);
1371 
1372 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1373 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1374 		return -EINVAL;
1375 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1376 		return -EINVAL;
1377 	return 0;
1378 }
1379 
1380 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1381 {
1382 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1383 	bool fixed = !!sock->file_slot;
1384 	struct file *file;
1385 	int ret, fd;
1386 
1387 	if (!fixed) {
1388 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1389 		if (unlikely(fd < 0))
1390 			return fd;
1391 	}
1392 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1393 	if (IS_ERR(file)) {
1394 		if (!fixed)
1395 			put_unused_fd(fd);
1396 		ret = PTR_ERR(file);
1397 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1398 			return -EAGAIN;
1399 		if (ret == -ERESTARTSYS)
1400 			ret = -EINTR;
1401 		req_set_fail(req);
1402 	} else if (!fixed) {
1403 		fd_install(fd, file);
1404 		ret = fd;
1405 	} else {
1406 		ret = io_fixed_fd_install(req, issue_flags, file,
1407 					    sock->file_slot);
1408 	}
1409 	io_req_set_res(req, ret, 0);
1410 	return IOU_OK;
1411 }
1412 
1413 int io_connect_prep_async(struct io_kiocb *req)
1414 {
1415 	struct io_async_connect *io = req->async_data;
1416 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1417 
1418 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1419 }
1420 
1421 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1422 {
1423 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1424 
1425 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1426 		return -EINVAL;
1427 
1428 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1429 	conn->addr_len =  READ_ONCE(sqe->addr2);
1430 	conn->in_progress = conn->seen_econnaborted = false;
1431 	return 0;
1432 }
1433 
1434 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1435 {
1436 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1437 	struct io_async_connect __io, *io;
1438 	unsigned file_flags;
1439 	int ret;
1440 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1441 
1442 	if (connect->in_progress) {
1443 		struct socket *socket;
1444 
1445 		ret = -ENOTSOCK;
1446 		socket = sock_from_file(req->file);
1447 		if (socket)
1448 			ret = sock_error(socket->sk);
1449 		goto out;
1450 	}
1451 
1452 	if (req_has_async_data(req)) {
1453 		io = req->async_data;
1454 	} else {
1455 		ret = move_addr_to_kernel(connect->addr,
1456 						connect->addr_len,
1457 						&__io.address);
1458 		if (ret)
1459 			goto out;
1460 		io = &__io;
1461 	}
1462 
1463 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1464 
1465 	ret = __sys_connect_file(req->file, &io->address,
1466 					connect->addr_len, file_flags);
1467 	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1468 	    && force_nonblock) {
1469 		if (ret == -EINPROGRESS) {
1470 			connect->in_progress = true;
1471 			return -EAGAIN;
1472 		}
1473 		if (ret == -ECONNABORTED) {
1474 			if (connect->seen_econnaborted)
1475 				goto out;
1476 			connect->seen_econnaborted = true;
1477 		}
1478 		if (req_has_async_data(req))
1479 			return -EAGAIN;
1480 		if (io_alloc_async_data(req)) {
1481 			ret = -ENOMEM;
1482 			goto out;
1483 		}
1484 		memcpy(req->async_data, &__io, sizeof(__io));
1485 		return -EAGAIN;
1486 	}
1487 	if (ret == -ERESTARTSYS)
1488 		ret = -EINTR;
1489 out:
1490 	if (ret < 0)
1491 		req_set_fail(req);
1492 	io_req_set_res(req, ret, 0);
1493 	return IOU_OK;
1494 }
1495 
1496 void io_netmsg_cache_free(struct io_cache_entry *entry)
1497 {
1498 	kfree(container_of(entry, struct io_async_msghdr, cache));
1499 }
1500 #endif
1501