xref: /openbmc/linux/io_uring/net.c (revision b4f63bbf)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	u32				file_slot;
32 	unsigned long			nofile;
33 };
34 
35 struct io_socket {
36 	struct file			*file;
37 	int				domain;
38 	int				type;
39 	int				protocol;
40 	int				flags;
41 	u32				file_slot;
42 	unsigned long			nofile;
43 };
44 
45 struct io_connect {
46 	struct file			*file;
47 	struct sockaddr __user		*addr;
48 	int				addr_len;
49 	bool				in_progress;
50 	bool				seen_econnaborted;
51 };
52 
53 struct io_sr_msg {
54 	struct file			*file;
55 	union {
56 		struct compat_msghdr __user	*umsg_compat;
57 		struct user_msghdr __user	*umsg;
58 		void __user			*buf;
59 	};
60 	unsigned			len;
61 	unsigned			done_io;
62 	unsigned			msg_flags;
63 	u16				flags;
64 	/* initialised and used only by !msg send variants */
65 	u16				addr_len;
66 	u16				buf_group;
67 	void __user			*addr;
68 	void __user			*msg_control;
69 	/* used only for send zerocopy */
70 	struct io_kiocb 		*notif;
71 };
72 
73 static inline bool io_check_multishot(struct io_kiocb *req,
74 				      unsigned int issue_flags)
75 {
76 	/*
77 	 * When ->locked_cq is set we only allow to post CQEs from the original
78 	 * task context. Usual request completions will be handled in other
79 	 * generic paths but multipoll may decide to post extra cqes.
80 	 */
81 	return !(issue_flags & IO_URING_F_IOWQ) ||
82 		!(issue_flags & IO_URING_F_MULTISHOT) ||
83 		!req->ctx->task_complete;
84 }
85 
86 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
87 {
88 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
89 
90 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
91 		     sqe->buf_index || sqe->splice_fd_in))
92 		return -EINVAL;
93 
94 	shutdown->how = READ_ONCE(sqe->len);
95 	req->flags |= REQ_F_FORCE_ASYNC;
96 	return 0;
97 }
98 
99 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
100 {
101 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
102 	struct socket *sock;
103 	int ret;
104 
105 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
106 
107 	sock = sock_from_file(req->file);
108 	if (unlikely(!sock))
109 		return -ENOTSOCK;
110 
111 	ret = __sys_shutdown_sock(sock, shutdown->how);
112 	io_req_set_res(req, ret, 0);
113 	return IOU_OK;
114 }
115 
116 static bool io_net_retry(struct socket *sock, int flags)
117 {
118 	if (!(flags & MSG_WAITALL))
119 		return false;
120 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
121 }
122 
123 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
124 {
125 	struct io_async_msghdr *hdr = req->async_data;
126 
127 	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
128 		return;
129 
130 	/* Let normal cleanup path reap it if we fail adding to the cache */
131 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
132 		req->async_data = NULL;
133 		req->flags &= ~REQ_F_ASYNC_DATA;
134 	}
135 }
136 
137 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
138 						  unsigned int issue_flags)
139 {
140 	struct io_ring_ctx *ctx = req->ctx;
141 	struct io_cache_entry *entry;
142 	struct io_async_msghdr *hdr;
143 
144 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
145 		entry = io_alloc_cache_get(&ctx->netmsg_cache);
146 		if (entry) {
147 			hdr = container_of(entry, struct io_async_msghdr, cache);
148 			hdr->free_iov = NULL;
149 			req->flags |= REQ_F_ASYNC_DATA;
150 			req->async_data = hdr;
151 			return hdr;
152 		}
153 	}
154 
155 	if (!io_alloc_async_data(req)) {
156 		hdr = req->async_data;
157 		hdr->free_iov = NULL;
158 		return hdr;
159 	}
160 	return NULL;
161 }
162 
163 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
164 {
165 	/* ->prep_async is always called from the submission context */
166 	return io_msg_alloc_async(req, 0);
167 }
168 
169 static int io_setup_async_msg(struct io_kiocb *req,
170 			      struct io_async_msghdr *kmsg,
171 			      unsigned int issue_flags)
172 {
173 	struct io_async_msghdr *async_msg;
174 
175 	if (req_has_async_data(req))
176 		return -EAGAIN;
177 	async_msg = io_msg_alloc_async(req, issue_flags);
178 	if (!async_msg) {
179 		kfree(kmsg->free_iov);
180 		return -ENOMEM;
181 	}
182 	req->flags |= REQ_F_NEED_CLEANUP;
183 	memcpy(async_msg, kmsg, sizeof(*kmsg));
184 	if (async_msg->msg.msg_name)
185 		async_msg->msg.msg_name = &async_msg->addr;
186 	/* if were using fast_iov, set it to the new one */
187 	if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
188 		size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
189 		async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
190 	}
191 
192 	return -EAGAIN;
193 }
194 
195 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
196 			       struct io_async_msghdr *iomsg)
197 {
198 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
199 	int ret;
200 
201 	iomsg->msg.msg_name = &iomsg->addr;
202 	iomsg->free_iov = iomsg->fast_iov;
203 	ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
204 					&iomsg->free_iov);
205 	/* save msg_control as sys_sendmsg() overwrites it */
206 	sr->msg_control = iomsg->msg.msg_control_user;
207 	return ret;
208 }
209 
210 int io_send_prep_async(struct io_kiocb *req)
211 {
212 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
213 	struct io_async_msghdr *io;
214 	int ret;
215 
216 	if (!zc->addr || req_has_async_data(req))
217 		return 0;
218 	io = io_msg_alloc_async_prep(req);
219 	if (!io)
220 		return -ENOMEM;
221 	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
222 	return ret;
223 }
224 
225 static int io_setup_async_addr(struct io_kiocb *req,
226 			      struct sockaddr_storage *addr_storage,
227 			      unsigned int issue_flags)
228 {
229 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
230 	struct io_async_msghdr *io;
231 
232 	if (!sr->addr || req_has_async_data(req))
233 		return -EAGAIN;
234 	io = io_msg_alloc_async(req, issue_flags);
235 	if (!io)
236 		return -ENOMEM;
237 	memcpy(&io->addr, addr_storage, sizeof(io->addr));
238 	return -EAGAIN;
239 }
240 
241 int io_sendmsg_prep_async(struct io_kiocb *req)
242 {
243 	int ret;
244 
245 	if (!io_msg_alloc_async_prep(req))
246 		return -ENOMEM;
247 	ret = io_sendmsg_copy_hdr(req, req->async_data);
248 	if (!ret)
249 		req->flags |= REQ_F_NEED_CLEANUP;
250 	return ret;
251 }
252 
253 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
254 {
255 	struct io_async_msghdr *io = req->async_data;
256 
257 	kfree(io->free_iov);
258 }
259 
260 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
261 {
262 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
263 
264 	if (req->opcode == IORING_OP_SEND) {
265 		if (READ_ONCE(sqe->__pad3[0]))
266 			return -EINVAL;
267 		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
268 		sr->addr_len = READ_ONCE(sqe->addr_len);
269 	} else if (sqe->addr2 || sqe->file_index) {
270 		return -EINVAL;
271 	}
272 
273 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
274 	sr->len = READ_ONCE(sqe->len);
275 	sr->flags = READ_ONCE(sqe->ioprio);
276 	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
277 		return -EINVAL;
278 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
279 	if (sr->msg_flags & MSG_DONTWAIT)
280 		req->flags |= REQ_F_NOWAIT;
281 
282 #ifdef CONFIG_COMPAT
283 	if (req->ctx->compat)
284 		sr->msg_flags |= MSG_CMSG_COMPAT;
285 #endif
286 	sr->done_io = 0;
287 	return 0;
288 }
289 
290 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
291 {
292 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
293 	struct io_async_msghdr iomsg, *kmsg;
294 	struct socket *sock;
295 	unsigned flags;
296 	int min_ret = 0;
297 	int ret;
298 
299 	sock = sock_from_file(req->file);
300 	if (unlikely(!sock))
301 		return -ENOTSOCK;
302 
303 	if (req_has_async_data(req)) {
304 		kmsg = req->async_data;
305 		kmsg->msg.msg_control_user = sr->msg_control;
306 	} else {
307 		ret = io_sendmsg_copy_hdr(req, &iomsg);
308 		if (ret)
309 			return ret;
310 		kmsg = &iomsg;
311 	}
312 
313 	if (!(req->flags & REQ_F_POLLED) &&
314 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
315 		return io_setup_async_msg(req, kmsg, issue_flags);
316 
317 	flags = sr->msg_flags;
318 	if (issue_flags & IO_URING_F_NONBLOCK)
319 		flags |= MSG_DONTWAIT;
320 	if (flags & MSG_WAITALL)
321 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
322 
323 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
324 
325 	if (ret < min_ret) {
326 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
327 			return io_setup_async_msg(req, kmsg, issue_flags);
328 		if (ret > 0 && io_net_retry(sock, flags)) {
329 			kmsg->msg.msg_controllen = 0;
330 			kmsg->msg.msg_control = NULL;
331 			sr->done_io += ret;
332 			req->flags |= REQ_F_PARTIAL_IO;
333 			return io_setup_async_msg(req, kmsg, issue_flags);
334 		}
335 		if (ret == -ERESTARTSYS)
336 			ret = -EINTR;
337 		req_set_fail(req);
338 	}
339 	/* fast path, check for non-NULL to avoid function call */
340 	if (kmsg->free_iov)
341 		kfree(kmsg->free_iov);
342 	req->flags &= ~REQ_F_NEED_CLEANUP;
343 	io_netmsg_recycle(req, issue_flags);
344 	if (ret >= 0)
345 		ret += sr->done_io;
346 	else if (sr->done_io)
347 		ret = sr->done_io;
348 	io_req_set_res(req, ret, 0);
349 	return IOU_OK;
350 }
351 
352 int io_send(struct io_kiocb *req, unsigned int issue_flags)
353 {
354 	struct sockaddr_storage __address;
355 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
356 	struct msghdr msg;
357 	struct socket *sock;
358 	unsigned flags;
359 	int min_ret = 0;
360 	int ret;
361 
362 	msg.msg_name = NULL;
363 	msg.msg_control = NULL;
364 	msg.msg_controllen = 0;
365 	msg.msg_namelen = 0;
366 	msg.msg_ubuf = NULL;
367 
368 	if (sr->addr) {
369 		if (req_has_async_data(req)) {
370 			struct io_async_msghdr *io = req->async_data;
371 
372 			msg.msg_name = &io->addr;
373 		} else {
374 			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
375 			if (unlikely(ret < 0))
376 				return ret;
377 			msg.msg_name = (struct sockaddr *)&__address;
378 		}
379 		msg.msg_namelen = sr->addr_len;
380 	}
381 
382 	if (!(req->flags & REQ_F_POLLED) &&
383 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
384 		return io_setup_async_addr(req, &__address, issue_flags);
385 
386 	sock = sock_from_file(req->file);
387 	if (unlikely(!sock))
388 		return -ENOTSOCK;
389 
390 	ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
391 	if (unlikely(ret))
392 		return ret;
393 
394 	flags = sr->msg_flags;
395 	if (issue_flags & IO_URING_F_NONBLOCK)
396 		flags |= MSG_DONTWAIT;
397 	if (flags & MSG_WAITALL)
398 		min_ret = iov_iter_count(&msg.msg_iter);
399 
400 	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
401 	msg.msg_flags = flags;
402 	ret = sock_sendmsg(sock, &msg);
403 	if (ret < min_ret) {
404 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
405 			return io_setup_async_addr(req, &__address, issue_flags);
406 
407 		if (ret > 0 && io_net_retry(sock, flags)) {
408 			sr->len -= ret;
409 			sr->buf += ret;
410 			sr->done_io += ret;
411 			req->flags |= REQ_F_PARTIAL_IO;
412 			return io_setup_async_addr(req, &__address, issue_flags);
413 		}
414 		if (ret == -ERESTARTSYS)
415 			ret = -EINTR;
416 		req_set_fail(req);
417 	}
418 	if (ret >= 0)
419 		ret += sr->done_io;
420 	else if (sr->done_io)
421 		ret = sr->done_io;
422 	io_req_set_res(req, ret, 0);
423 	return IOU_OK;
424 }
425 
426 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
427 {
428 	int hdr;
429 
430 	if (iomsg->namelen < 0)
431 		return true;
432 	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
433 			       iomsg->namelen, &hdr))
434 		return true;
435 	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
436 		return true;
437 
438 	return false;
439 }
440 
441 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
442 				 struct io_async_msghdr *iomsg)
443 {
444 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
445 	struct user_msghdr msg;
446 	int ret;
447 
448 	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
449 		return -EFAULT;
450 
451 	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
452 	if (ret)
453 		return ret;
454 
455 	if (req->flags & REQ_F_BUFFER_SELECT) {
456 		if (msg.msg_iovlen == 0) {
457 			sr->len = iomsg->fast_iov[0].iov_len = 0;
458 			iomsg->fast_iov[0].iov_base = NULL;
459 			iomsg->free_iov = NULL;
460 		} else if (msg.msg_iovlen > 1) {
461 			return -EINVAL;
462 		} else {
463 			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
464 				return -EFAULT;
465 			sr->len = iomsg->fast_iov[0].iov_len;
466 			iomsg->free_iov = NULL;
467 		}
468 
469 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
470 			iomsg->namelen = msg.msg_namelen;
471 			iomsg->controllen = msg.msg_controllen;
472 			if (io_recvmsg_multishot_overflow(iomsg))
473 				return -EOVERFLOW;
474 		}
475 	} else {
476 		iomsg->free_iov = iomsg->fast_iov;
477 		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
478 				     &iomsg->free_iov, &iomsg->msg.msg_iter,
479 				     false);
480 		if (ret > 0)
481 			ret = 0;
482 	}
483 
484 	return ret;
485 }
486 
487 #ifdef CONFIG_COMPAT
488 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
489 					struct io_async_msghdr *iomsg)
490 {
491 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
492 	struct compat_msghdr msg;
493 	struct compat_iovec __user *uiov;
494 	int ret;
495 
496 	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
497 		return -EFAULT;
498 
499 	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
500 	if (ret)
501 		return ret;
502 
503 	uiov = compat_ptr(msg.msg_iov);
504 	if (req->flags & REQ_F_BUFFER_SELECT) {
505 		compat_ssize_t clen;
506 
507 		iomsg->free_iov = NULL;
508 		if (msg.msg_iovlen == 0) {
509 			sr->len = 0;
510 		} else if (msg.msg_iovlen > 1) {
511 			return -EINVAL;
512 		} else {
513 			if (!access_ok(uiov, sizeof(*uiov)))
514 				return -EFAULT;
515 			if (__get_user(clen, &uiov->iov_len))
516 				return -EFAULT;
517 			if (clen < 0)
518 				return -EINVAL;
519 			sr->len = clen;
520 		}
521 
522 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
523 			iomsg->namelen = msg.msg_namelen;
524 			iomsg->controllen = msg.msg_controllen;
525 			if (io_recvmsg_multishot_overflow(iomsg))
526 				return -EOVERFLOW;
527 		}
528 	} else {
529 		iomsg->free_iov = iomsg->fast_iov;
530 		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
531 				   UIO_FASTIOV, &iomsg->free_iov,
532 				   &iomsg->msg.msg_iter, true);
533 		if (ret < 0)
534 			return ret;
535 	}
536 
537 	return 0;
538 }
539 #endif
540 
541 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
542 			       struct io_async_msghdr *iomsg)
543 {
544 	iomsg->msg.msg_name = &iomsg->addr;
545 
546 #ifdef CONFIG_COMPAT
547 	if (req->ctx->compat)
548 		return __io_compat_recvmsg_copy_hdr(req, iomsg);
549 #endif
550 
551 	return __io_recvmsg_copy_hdr(req, iomsg);
552 }
553 
554 int io_recvmsg_prep_async(struct io_kiocb *req)
555 {
556 	int ret;
557 
558 	if (!io_msg_alloc_async_prep(req))
559 		return -ENOMEM;
560 	ret = io_recvmsg_copy_hdr(req, req->async_data);
561 	if (!ret)
562 		req->flags |= REQ_F_NEED_CLEANUP;
563 	return ret;
564 }
565 
566 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
567 
568 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
569 {
570 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
571 
572 	if (unlikely(sqe->file_index || sqe->addr2))
573 		return -EINVAL;
574 
575 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
576 	sr->len = READ_ONCE(sqe->len);
577 	sr->flags = READ_ONCE(sqe->ioprio);
578 	if (sr->flags & ~(RECVMSG_FLAGS))
579 		return -EINVAL;
580 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
581 	if (sr->msg_flags & MSG_DONTWAIT)
582 		req->flags |= REQ_F_NOWAIT;
583 	if (sr->msg_flags & MSG_ERRQUEUE)
584 		req->flags |= REQ_F_CLEAR_POLLIN;
585 	if (sr->flags & IORING_RECV_MULTISHOT) {
586 		if (!(req->flags & REQ_F_BUFFER_SELECT))
587 			return -EINVAL;
588 		if (sr->msg_flags & MSG_WAITALL)
589 			return -EINVAL;
590 		if (req->opcode == IORING_OP_RECV && sr->len)
591 			return -EINVAL;
592 		req->flags |= REQ_F_APOLL_MULTISHOT;
593 		/*
594 		 * Store the buffer group for this multishot receive separately,
595 		 * as if we end up doing an io-wq based issue that selects a
596 		 * buffer, it has to be committed immediately and that will
597 		 * clear ->buf_list. This means we lose the link to the buffer
598 		 * list, and the eventual buffer put on completion then cannot
599 		 * restore it.
600 		 */
601 		sr->buf_group = req->buf_index;
602 	}
603 
604 #ifdef CONFIG_COMPAT
605 	if (req->ctx->compat)
606 		sr->msg_flags |= MSG_CMSG_COMPAT;
607 #endif
608 	sr->done_io = 0;
609 	return 0;
610 }
611 
612 static inline void io_recv_prep_retry(struct io_kiocb *req)
613 {
614 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
615 
616 	sr->done_io = 0;
617 	sr->len = 0; /* get from the provided buffer */
618 	req->buf_index = sr->buf_group;
619 }
620 
621 /*
622  * Finishes io_recv and io_recvmsg.
623  *
624  * Returns true if it is actually finished, or false if it should run
625  * again (for multishot).
626  */
627 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
628 				  struct msghdr *msg, bool mshot_finished,
629 				  unsigned issue_flags)
630 {
631 	unsigned int cflags;
632 
633 	cflags = io_put_kbuf(req, issue_flags);
634 	if (msg->msg_inq && msg->msg_inq != -1)
635 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
636 
637 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
638 		io_req_set_res(req, *ret, cflags);
639 		*ret = IOU_OK;
640 		return true;
641 	}
642 
643 	if (!mshot_finished) {
644 		if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
645 			       *ret, cflags | IORING_CQE_F_MORE, true)) {
646 			io_recv_prep_retry(req);
647 			/* Known not-empty or unknown state, retry */
648 			if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
649 			    msg->msg_inq == -1)
650 				return false;
651 			if (issue_flags & IO_URING_F_MULTISHOT)
652 				*ret = IOU_ISSUE_SKIP_COMPLETE;
653 			else
654 				*ret = -EAGAIN;
655 			return true;
656 		}
657 		/* Otherwise stop multishot but use the current result. */
658 	}
659 
660 	io_req_set_res(req, *ret, cflags);
661 
662 	if (issue_flags & IO_URING_F_MULTISHOT)
663 		*ret = IOU_STOP_MULTISHOT;
664 	else
665 		*ret = IOU_OK;
666 	return true;
667 }
668 
669 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
670 				     struct io_sr_msg *sr, void __user **buf,
671 				     size_t *len)
672 {
673 	unsigned long ubuf = (unsigned long) *buf;
674 	unsigned long hdr;
675 
676 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
677 		kmsg->controllen;
678 	if (*len < hdr)
679 		return -EFAULT;
680 
681 	if (kmsg->controllen) {
682 		unsigned long control = ubuf + hdr - kmsg->controllen;
683 
684 		kmsg->msg.msg_control_user = (void __user *) control;
685 		kmsg->msg.msg_controllen = kmsg->controllen;
686 	}
687 
688 	sr->buf = *buf; /* stash for later copy */
689 	*buf = (void __user *) (ubuf + hdr);
690 	kmsg->payloadlen = *len = *len - hdr;
691 	return 0;
692 }
693 
694 struct io_recvmsg_multishot_hdr {
695 	struct io_uring_recvmsg_out msg;
696 	struct sockaddr_storage addr;
697 };
698 
699 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
700 				struct io_async_msghdr *kmsg,
701 				unsigned int flags, bool *finished)
702 {
703 	int err;
704 	int copy_len;
705 	struct io_recvmsg_multishot_hdr hdr;
706 
707 	if (kmsg->namelen)
708 		kmsg->msg.msg_name = &hdr.addr;
709 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
710 	kmsg->msg.msg_namelen = 0;
711 
712 	if (sock->file->f_flags & O_NONBLOCK)
713 		flags |= MSG_DONTWAIT;
714 
715 	err = sock_recvmsg(sock, &kmsg->msg, flags);
716 	*finished = err <= 0;
717 	if (err < 0)
718 		return err;
719 
720 	hdr.msg = (struct io_uring_recvmsg_out) {
721 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
722 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
723 	};
724 
725 	hdr.msg.payloadlen = err;
726 	if (err > kmsg->payloadlen)
727 		err = kmsg->payloadlen;
728 
729 	copy_len = sizeof(struct io_uring_recvmsg_out);
730 	if (kmsg->msg.msg_namelen > kmsg->namelen)
731 		copy_len += kmsg->namelen;
732 	else
733 		copy_len += kmsg->msg.msg_namelen;
734 
735 	/*
736 	 *      "fromlen shall refer to the value before truncation.."
737 	 *                      1003.1g
738 	 */
739 	hdr.msg.namelen = kmsg->msg.msg_namelen;
740 
741 	/* ensure that there is no gap between hdr and sockaddr_storage */
742 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
743 		     sizeof(struct io_uring_recvmsg_out));
744 	if (copy_to_user(io->buf, &hdr, copy_len)) {
745 		*finished = true;
746 		return -EFAULT;
747 	}
748 
749 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
750 			kmsg->controllen + err;
751 }
752 
753 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
754 {
755 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
756 	struct io_async_msghdr iomsg, *kmsg;
757 	struct socket *sock;
758 	unsigned flags;
759 	int ret, min_ret = 0;
760 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
761 	bool mshot_finished = true;
762 
763 	sock = sock_from_file(req->file);
764 	if (unlikely(!sock))
765 		return -ENOTSOCK;
766 
767 	if (req_has_async_data(req)) {
768 		kmsg = req->async_data;
769 	} else {
770 		ret = io_recvmsg_copy_hdr(req, &iomsg);
771 		if (ret)
772 			return ret;
773 		kmsg = &iomsg;
774 	}
775 
776 	if (!(req->flags & REQ_F_POLLED) &&
777 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
778 		return io_setup_async_msg(req, kmsg, issue_flags);
779 
780 	if (!io_check_multishot(req, issue_flags))
781 		return io_setup_async_msg(req, kmsg, issue_flags);
782 
783 retry_multishot:
784 	if (io_do_buffer_select(req)) {
785 		void __user *buf;
786 		size_t len = sr->len;
787 
788 		buf = io_buffer_select(req, &len, issue_flags);
789 		if (!buf)
790 			return -ENOBUFS;
791 
792 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
793 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
794 			if (ret) {
795 				io_kbuf_recycle(req, issue_flags);
796 				return ret;
797 			}
798 		}
799 
800 		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
801 	}
802 
803 	flags = sr->msg_flags;
804 	if (force_nonblock)
805 		flags |= MSG_DONTWAIT;
806 
807 	kmsg->msg.msg_get_inq = 1;
808 	kmsg->msg.msg_inq = -1;
809 	if (req->flags & REQ_F_APOLL_MULTISHOT) {
810 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
811 					   &mshot_finished);
812 	} else {
813 		/* disable partial retry for recvmsg with cmsg attached */
814 		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
815 			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
816 
817 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
818 					 kmsg->uaddr, flags);
819 	}
820 
821 	if (ret < min_ret) {
822 		if (ret == -EAGAIN && force_nonblock) {
823 			ret = io_setup_async_msg(req, kmsg, issue_flags);
824 			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
825 				io_kbuf_recycle(req, issue_flags);
826 				return IOU_ISSUE_SKIP_COMPLETE;
827 			}
828 			return ret;
829 		}
830 		if (ret > 0 && io_net_retry(sock, flags)) {
831 			sr->done_io += ret;
832 			req->flags |= REQ_F_PARTIAL_IO;
833 			return io_setup_async_msg(req, kmsg, issue_flags);
834 		}
835 		if (ret == -ERESTARTSYS)
836 			ret = -EINTR;
837 		req_set_fail(req);
838 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
839 		req_set_fail(req);
840 	}
841 
842 	if (ret > 0)
843 		ret += sr->done_io;
844 	else if (sr->done_io)
845 		ret = sr->done_io;
846 	else
847 		io_kbuf_recycle(req, issue_flags);
848 
849 	if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
850 		goto retry_multishot;
851 
852 	if (mshot_finished) {
853 		/* fast path, check for non-NULL to avoid function call */
854 		if (kmsg->free_iov)
855 			kfree(kmsg->free_iov);
856 		io_netmsg_recycle(req, issue_flags);
857 		req->flags &= ~REQ_F_NEED_CLEANUP;
858 	}
859 
860 	return ret;
861 }
862 
863 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
864 {
865 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
866 	struct msghdr msg;
867 	struct socket *sock;
868 	unsigned flags;
869 	int ret, min_ret = 0;
870 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
871 	size_t len = sr->len;
872 
873 	if (!(req->flags & REQ_F_POLLED) &&
874 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
875 		return -EAGAIN;
876 
877 	if (!io_check_multishot(req, issue_flags))
878 		return -EAGAIN;
879 
880 	sock = sock_from_file(req->file);
881 	if (unlikely(!sock))
882 		return -ENOTSOCK;
883 
884 	msg.msg_name = NULL;
885 	msg.msg_namelen = 0;
886 	msg.msg_control = NULL;
887 	msg.msg_get_inq = 1;
888 	msg.msg_controllen = 0;
889 	msg.msg_iocb = NULL;
890 	msg.msg_ubuf = NULL;
891 
892 retry_multishot:
893 	if (io_do_buffer_select(req)) {
894 		void __user *buf;
895 
896 		buf = io_buffer_select(req, &len, issue_flags);
897 		if (!buf)
898 			return -ENOBUFS;
899 		sr->buf = buf;
900 	}
901 
902 	ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
903 	if (unlikely(ret))
904 		goto out_free;
905 
906 	msg.msg_inq = -1;
907 	msg.msg_flags = 0;
908 
909 	flags = sr->msg_flags;
910 	if (force_nonblock)
911 		flags |= MSG_DONTWAIT;
912 	if (flags & MSG_WAITALL)
913 		min_ret = iov_iter_count(&msg.msg_iter);
914 
915 	ret = sock_recvmsg(sock, &msg, flags);
916 	if (ret < min_ret) {
917 		if (ret == -EAGAIN && force_nonblock) {
918 			if (issue_flags & IO_URING_F_MULTISHOT) {
919 				io_kbuf_recycle(req, issue_flags);
920 				return IOU_ISSUE_SKIP_COMPLETE;
921 			}
922 
923 			return -EAGAIN;
924 		}
925 		if (ret > 0 && io_net_retry(sock, flags)) {
926 			sr->len -= ret;
927 			sr->buf += ret;
928 			sr->done_io += ret;
929 			req->flags |= REQ_F_PARTIAL_IO;
930 			return -EAGAIN;
931 		}
932 		if (ret == -ERESTARTSYS)
933 			ret = -EINTR;
934 		req_set_fail(req);
935 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
936 out_free:
937 		req_set_fail(req);
938 	}
939 
940 	if (ret > 0)
941 		ret += sr->done_io;
942 	else if (sr->done_io)
943 		ret = sr->done_io;
944 	else
945 		io_kbuf_recycle(req, issue_flags);
946 
947 	if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
948 		goto retry_multishot;
949 
950 	return ret;
951 }
952 
953 void io_send_zc_cleanup(struct io_kiocb *req)
954 {
955 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
956 	struct io_async_msghdr *io;
957 
958 	if (req_has_async_data(req)) {
959 		io = req->async_data;
960 		/* might be ->fast_iov if *msg_copy_hdr failed */
961 		if (io->free_iov != io->fast_iov)
962 			kfree(io->free_iov);
963 	}
964 	if (zc->notif) {
965 		io_notif_flush(zc->notif);
966 		zc->notif = NULL;
967 	}
968 }
969 
970 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
971 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
972 
973 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
974 {
975 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
976 	struct io_ring_ctx *ctx = req->ctx;
977 	struct io_kiocb *notif;
978 
979 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
980 		return -EINVAL;
981 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
982 	if (req->flags & REQ_F_CQE_SKIP)
983 		return -EINVAL;
984 
985 	notif = zc->notif = io_alloc_notif(ctx);
986 	if (!notif)
987 		return -ENOMEM;
988 	notif->cqe.user_data = req->cqe.user_data;
989 	notif->cqe.res = 0;
990 	notif->cqe.flags = IORING_CQE_F_NOTIF;
991 	req->flags |= REQ_F_NEED_CLEANUP;
992 
993 	zc->flags = READ_ONCE(sqe->ioprio);
994 	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
995 		if (zc->flags & ~IO_ZC_FLAGS_VALID)
996 			return -EINVAL;
997 		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
998 			io_notif_set_extended(notif);
999 			io_notif_to_data(notif)->zc_report = true;
1000 		}
1001 	}
1002 
1003 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1004 		unsigned idx = READ_ONCE(sqe->buf_index);
1005 
1006 		if (unlikely(idx >= ctx->nr_user_bufs))
1007 			return -EFAULT;
1008 		idx = array_index_nospec(idx, ctx->nr_user_bufs);
1009 		req->imu = READ_ONCE(ctx->user_bufs[idx]);
1010 		io_req_set_rsrc_node(notif, ctx, 0);
1011 	}
1012 
1013 	if (req->opcode == IORING_OP_SEND_ZC) {
1014 		if (READ_ONCE(sqe->__pad3[0]))
1015 			return -EINVAL;
1016 		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1017 		zc->addr_len = READ_ONCE(sqe->addr_len);
1018 	} else {
1019 		if (unlikely(sqe->addr2 || sqe->file_index))
1020 			return -EINVAL;
1021 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1022 			return -EINVAL;
1023 	}
1024 
1025 	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1026 	zc->len = READ_ONCE(sqe->len);
1027 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1028 	if (zc->msg_flags & MSG_DONTWAIT)
1029 		req->flags |= REQ_F_NOWAIT;
1030 
1031 	zc->done_io = 0;
1032 
1033 #ifdef CONFIG_COMPAT
1034 	if (req->ctx->compat)
1035 		zc->msg_flags |= MSG_CMSG_COMPAT;
1036 #endif
1037 	return 0;
1038 }
1039 
1040 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1041 				 struct iov_iter *from, size_t length)
1042 {
1043 	skb_zcopy_downgrade_managed(skb);
1044 	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1045 }
1046 
1047 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1048 			   struct iov_iter *from, size_t length)
1049 {
1050 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1051 	int frag = shinfo->nr_frags;
1052 	int ret = 0;
1053 	struct bvec_iter bi;
1054 	ssize_t copied = 0;
1055 	unsigned long truesize = 0;
1056 
1057 	if (!frag)
1058 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1059 	else if (unlikely(!skb_zcopy_managed(skb)))
1060 		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1061 
1062 	bi.bi_size = min(from->count, length);
1063 	bi.bi_bvec_done = from->iov_offset;
1064 	bi.bi_idx = 0;
1065 
1066 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1067 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1068 
1069 		copied += v.bv_len;
1070 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1071 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1072 					   v.bv_offset, v.bv_len);
1073 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1074 	}
1075 	if (bi.bi_size)
1076 		ret = -EMSGSIZE;
1077 
1078 	shinfo->nr_frags = frag;
1079 	from->bvec += bi.bi_idx;
1080 	from->nr_segs -= bi.bi_idx;
1081 	from->count -= copied;
1082 	from->iov_offset = bi.bi_bvec_done;
1083 
1084 	skb->data_len += copied;
1085 	skb->len += copied;
1086 	skb->truesize += truesize;
1087 
1088 	if (sk && sk->sk_type == SOCK_STREAM) {
1089 		sk_wmem_queued_add(sk, truesize);
1090 		if (!skb_zcopy_pure(skb))
1091 			sk_mem_charge(sk, truesize);
1092 	} else {
1093 		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1094 	}
1095 	return ret;
1096 }
1097 
1098 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1099 {
1100 	struct sockaddr_storage __address;
1101 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1102 	struct msghdr msg;
1103 	struct socket *sock;
1104 	unsigned msg_flags;
1105 	int ret, min_ret = 0;
1106 
1107 	sock = sock_from_file(req->file);
1108 	if (unlikely(!sock))
1109 		return -ENOTSOCK;
1110 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1111 		return -EOPNOTSUPP;
1112 
1113 	msg.msg_name = NULL;
1114 	msg.msg_control = NULL;
1115 	msg.msg_controllen = 0;
1116 	msg.msg_namelen = 0;
1117 
1118 	if (zc->addr) {
1119 		if (req_has_async_data(req)) {
1120 			struct io_async_msghdr *io = req->async_data;
1121 
1122 			msg.msg_name = &io->addr;
1123 		} else {
1124 			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1125 			if (unlikely(ret < 0))
1126 				return ret;
1127 			msg.msg_name = (struct sockaddr *)&__address;
1128 		}
1129 		msg.msg_namelen = zc->addr_len;
1130 	}
1131 
1132 	if (!(req->flags & REQ_F_POLLED) &&
1133 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1134 		return io_setup_async_addr(req, &__address, issue_flags);
1135 
1136 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1137 		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1138 					(u64)(uintptr_t)zc->buf, zc->len);
1139 		if (unlikely(ret))
1140 			return ret;
1141 		msg.sg_from_iter = io_sg_from_iter;
1142 	} else {
1143 		io_notif_set_extended(zc->notif);
1144 		ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1145 		if (unlikely(ret))
1146 			return ret;
1147 		ret = io_notif_account_mem(zc->notif, zc->len);
1148 		if (unlikely(ret))
1149 			return ret;
1150 		msg.sg_from_iter = io_sg_from_iter_iovec;
1151 	}
1152 
1153 	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1154 	if (issue_flags & IO_URING_F_NONBLOCK)
1155 		msg_flags |= MSG_DONTWAIT;
1156 	if (msg_flags & MSG_WAITALL)
1157 		min_ret = iov_iter_count(&msg.msg_iter);
1158 	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1159 
1160 	msg.msg_flags = msg_flags;
1161 	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1162 	ret = sock_sendmsg(sock, &msg);
1163 
1164 	if (unlikely(ret < min_ret)) {
1165 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1166 			return io_setup_async_addr(req, &__address, issue_flags);
1167 
1168 		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1169 			zc->len -= ret;
1170 			zc->buf += ret;
1171 			zc->done_io += ret;
1172 			req->flags |= REQ_F_PARTIAL_IO;
1173 			return io_setup_async_addr(req, &__address, issue_flags);
1174 		}
1175 		if (ret == -ERESTARTSYS)
1176 			ret = -EINTR;
1177 		req_set_fail(req);
1178 	}
1179 
1180 	if (ret >= 0)
1181 		ret += zc->done_io;
1182 	else if (zc->done_io)
1183 		ret = zc->done_io;
1184 
1185 	/*
1186 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1187 	 * flushing notif to io_send_zc_cleanup()
1188 	 */
1189 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1190 		io_notif_flush(zc->notif);
1191 		req->flags &= ~REQ_F_NEED_CLEANUP;
1192 	}
1193 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1194 	return IOU_OK;
1195 }
1196 
1197 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1198 {
1199 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1200 	struct io_async_msghdr iomsg, *kmsg;
1201 	struct socket *sock;
1202 	unsigned flags;
1203 	int ret, min_ret = 0;
1204 
1205 	io_notif_set_extended(sr->notif);
1206 
1207 	sock = sock_from_file(req->file);
1208 	if (unlikely(!sock))
1209 		return -ENOTSOCK;
1210 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1211 		return -EOPNOTSUPP;
1212 
1213 	if (req_has_async_data(req)) {
1214 		kmsg = req->async_data;
1215 	} else {
1216 		ret = io_sendmsg_copy_hdr(req, &iomsg);
1217 		if (ret)
1218 			return ret;
1219 		kmsg = &iomsg;
1220 	}
1221 
1222 	if (!(req->flags & REQ_F_POLLED) &&
1223 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1224 		return io_setup_async_msg(req, kmsg, issue_flags);
1225 
1226 	flags = sr->msg_flags | MSG_ZEROCOPY;
1227 	if (issue_flags & IO_URING_F_NONBLOCK)
1228 		flags |= MSG_DONTWAIT;
1229 	if (flags & MSG_WAITALL)
1230 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1231 
1232 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1233 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1234 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1235 
1236 	if (unlikely(ret < min_ret)) {
1237 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1238 			return io_setup_async_msg(req, kmsg, issue_flags);
1239 
1240 		if (ret > 0 && io_net_retry(sock, flags)) {
1241 			sr->done_io += ret;
1242 			req->flags |= REQ_F_PARTIAL_IO;
1243 			return io_setup_async_msg(req, kmsg, issue_flags);
1244 		}
1245 		if (ret == -ERESTARTSYS)
1246 			ret = -EINTR;
1247 		req_set_fail(req);
1248 	}
1249 	/* fast path, check for non-NULL to avoid function call */
1250 	if (kmsg->free_iov) {
1251 		kfree(kmsg->free_iov);
1252 		kmsg->free_iov = NULL;
1253 	}
1254 
1255 	io_netmsg_recycle(req, issue_flags);
1256 	if (ret >= 0)
1257 		ret += sr->done_io;
1258 	else if (sr->done_io)
1259 		ret = sr->done_io;
1260 
1261 	/*
1262 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1263 	 * flushing notif to io_send_zc_cleanup()
1264 	 */
1265 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1266 		io_notif_flush(sr->notif);
1267 		req->flags &= ~REQ_F_NEED_CLEANUP;
1268 	}
1269 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1270 	return IOU_OK;
1271 }
1272 
1273 void io_sendrecv_fail(struct io_kiocb *req)
1274 {
1275 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1276 
1277 	if (req->flags & REQ_F_PARTIAL_IO)
1278 		req->cqe.res = sr->done_io;
1279 
1280 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1281 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1282 		req->cqe.flags |= IORING_CQE_F_MORE;
1283 }
1284 
1285 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1286 {
1287 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1288 	unsigned flags;
1289 
1290 	if (sqe->len || sqe->buf_index)
1291 		return -EINVAL;
1292 
1293 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1294 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1295 	accept->flags = READ_ONCE(sqe->accept_flags);
1296 	accept->nofile = rlimit(RLIMIT_NOFILE);
1297 	flags = READ_ONCE(sqe->ioprio);
1298 	if (flags & ~IORING_ACCEPT_MULTISHOT)
1299 		return -EINVAL;
1300 
1301 	accept->file_slot = READ_ONCE(sqe->file_index);
1302 	if (accept->file_slot) {
1303 		if (accept->flags & SOCK_CLOEXEC)
1304 			return -EINVAL;
1305 		if (flags & IORING_ACCEPT_MULTISHOT &&
1306 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1307 			return -EINVAL;
1308 	}
1309 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1310 		return -EINVAL;
1311 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1312 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1313 	if (flags & IORING_ACCEPT_MULTISHOT)
1314 		req->flags |= REQ_F_APOLL_MULTISHOT;
1315 	return 0;
1316 }
1317 
1318 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1319 {
1320 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1321 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1322 	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1323 	bool fixed = !!accept->file_slot;
1324 	struct file *file;
1325 	int ret, fd;
1326 
1327 	if (!io_check_multishot(req, issue_flags))
1328 		return -EAGAIN;
1329 retry:
1330 	if (!fixed) {
1331 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1332 		if (unlikely(fd < 0))
1333 			return fd;
1334 	}
1335 	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1336 			 accept->flags);
1337 	if (IS_ERR(file)) {
1338 		if (!fixed)
1339 			put_unused_fd(fd);
1340 		ret = PTR_ERR(file);
1341 		if (ret == -EAGAIN && force_nonblock) {
1342 			/*
1343 			 * if it's multishot and polled, we don't need to
1344 			 * return EAGAIN to arm the poll infra since it
1345 			 * has already been done
1346 			 */
1347 			if (issue_flags & IO_URING_F_MULTISHOT)
1348 				ret = IOU_ISSUE_SKIP_COMPLETE;
1349 			return ret;
1350 		}
1351 		if (ret == -ERESTARTSYS)
1352 			ret = -EINTR;
1353 		req_set_fail(req);
1354 	} else if (!fixed) {
1355 		fd_install(fd, file);
1356 		ret = fd;
1357 	} else {
1358 		ret = io_fixed_fd_install(req, issue_flags, file,
1359 						accept->file_slot);
1360 	}
1361 
1362 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1363 		io_req_set_res(req, ret, 0);
1364 		return IOU_OK;
1365 	}
1366 
1367 	if (ret < 0)
1368 		return ret;
1369 	if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
1370 		       IORING_CQE_F_MORE, true))
1371 		goto retry;
1372 
1373 	return -ECANCELED;
1374 }
1375 
1376 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1377 {
1378 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1379 
1380 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1381 		return -EINVAL;
1382 
1383 	sock->domain = READ_ONCE(sqe->fd);
1384 	sock->type = READ_ONCE(sqe->off);
1385 	sock->protocol = READ_ONCE(sqe->len);
1386 	sock->file_slot = READ_ONCE(sqe->file_index);
1387 	sock->nofile = rlimit(RLIMIT_NOFILE);
1388 
1389 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1390 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1391 		return -EINVAL;
1392 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1393 		return -EINVAL;
1394 	return 0;
1395 }
1396 
1397 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1398 {
1399 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1400 	bool fixed = !!sock->file_slot;
1401 	struct file *file;
1402 	int ret, fd;
1403 
1404 	if (!fixed) {
1405 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1406 		if (unlikely(fd < 0))
1407 			return fd;
1408 	}
1409 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1410 	if (IS_ERR(file)) {
1411 		if (!fixed)
1412 			put_unused_fd(fd);
1413 		ret = PTR_ERR(file);
1414 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1415 			return -EAGAIN;
1416 		if (ret == -ERESTARTSYS)
1417 			ret = -EINTR;
1418 		req_set_fail(req);
1419 	} else if (!fixed) {
1420 		fd_install(fd, file);
1421 		ret = fd;
1422 	} else {
1423 		ret = io_fixed_fd_install(req, issue_flags, file,
1424 					    sock->file_slot);
1425 	}
1426 	io_req_set_res(req, ret, 0);
1427 	return IOU_OK;
1428 }
1429 
1430 int io_connect_prep_async(struct io_kiocb *req)
1431 {
1432 	struct io_async_connect *io = req->async_data;
1433 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1434 
1435 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1436 }
1437 
1438 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1439 {
1440 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1441 
1442 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1443 		return -EINVAL;
1444 
1445 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1446 	conn->addr_len =  READ_ONCE(sqe->addr2);
1447 	conn->in_progress = conn->seen_econnaborted = false;
1448 	return 0;
1449 }
1450 
1451 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1452 {
1453 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1454 	struct io_async_connect __io, *io;
1455 	unsigned file_flags;
1456 	int ret;
1457 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1458 
1459 	if (connect->in_progress) {
1460 		struct socket *socket;
1461 
1462 		ret = -ENOTSOCK;
1463 		socket = sock_from_file(req->file);
1464 		if (socket)
1465 			ret = sock_error(socket->sk);
1466 		goto out;
1467 	}
1468 
1469 	if (req_has_async_data(req)) {
1470 		io = req->async_data;
1471 	} else {
1472 		ret = move_addr_to_kernel(connect->addr,
1473 						connect->addr_len,
1474 						&__io.address);
1475 		if (ret)
1476 			goto out;
1477 		io = &__io;
1478 	}
1479 
1480 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1481 
1482 	ret = __sys_connect_file(req->file, &io->address,
1483 					connect->addr_len, file_flags);
1484 	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1485 	    && force_nonblock) {
1486 		if (ret == -EINPROGRESS) {
1487 			connect->in_progress = true;
1488 			return -EAGAIN;
1489 		}
1490 		if (ret == -ECONNABORTED) {
1491 			if (connect->seen_econnaborted)
1492 				goto out;
1493 			connect->seen_econnaborted = true;
1494 		}
1495 		if (req_has_async_data(req))
1496 			return -EAGAIN;
1497 		if (io_alloc_async_data(req)) {
1498 			ret = -ENOMEM;
1499 			goto out;
1500 		}
1501 		memcpy(req->async_data, &__io, sizeof(__io));
1502 		return -EAGAIN;
1503 	}
1504 	if (ret == -ERESTARTSYS)
1505 		ret = -EINTR;
1506 out:
1507 	if (ret < 0)
1508 		req_set_fail(req);
1509 	io_req_set_res(req, ret, 0);
1510 	return IOU_OK;
1511 }
1512 
1513 void io_netmsg_cache_free(struct io_cache_entry *entry)
1514 {
1515 	kfree(container_of(entry, struct io_async_msghdr, cache));
1516 }
1517 #endif
1518