1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10
11 #include <uapi/linux/io_uring.h>
12
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 struct file *file;
23 int how;
24 };
25
26 struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33 };
34
35 struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43 };
44
45 struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
49 bool in_progress;
50 bool seen_econnaborted;
51 };
52
53 struct io_sr_msg {
54 struct file *file;
55 union {
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
58 void __user *buf;
59 };
60 unsigned len;
61 unsigned done_io;
62 unsigned msg_flags;
63 unsigned nr_multishot_loops;
64 u16 flags;
65 /* initialised and used only by !msg send variants */
66 u16 addr_len;
67 u16 buf_group;
68 void __user *addr;
69 void __user *msg_control;
70 /* used only for send zerocopy */
71 struct io_kiocb *notif;
72 };
73
74 /*
75 * Number of times we'll try and do receives if there's more data. If we
76 * exceed this limit, then add us to the back of the queue and retry from
77 * there. This helps fairness between flooding clients.
78 */
79 #define MULTISHOT_MAX_RETRY 32
80
io_check_multishot(struct io_kiocb * req,unsigned int issue_flags)81 static inline bool io_check_multishot(struct io_kiocb *req,
82 unsigned int issue_flags)
83 {
84 /*
85 * When ->locked_cq is set we only allow to post CQEs from the original
86 * task context. Usual request completions will be handled in other
87 * generic paths but multipoll may decide to post extra cqes.
88 */
89 return !(issue_flags & IO_URING_F_IOWQ) ||
90 !(req->flags & REQ_F_APOLL_MULTISHOT) ||
91 !req->ctx->task_complete;
92 }
93
io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)94 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
95 {
96 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
97
98 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
99 sqe->buf_index || sqe->splice_fd_in))
100 return -EINVAL;
101
102 shutdown->how = READ_ONCE(sqe->len);
103 req->flags |= REQ_F_FORCE_ASYNC;
104 return 0;
105 }
106
io_shutdown(struct io_kiocb * req,unsigned int issue_flags)107 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
108 {
109 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
110 struct socket *sock;
111 int ret;
112
113 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
114
115 sock = sock_from_file(req->file);
116 if (unlikely(!sock))
117 return -ENOTSOCK;
118
119 ret = __sys_shutdown_sock(sock, shutdown->how);
120 io_req_set_res(req, ret, 0);
121 return IOU_OK;
122 }
123
io_net_retry(struct socket * sock,int flags)124 static bool io_net_retry(struct socket *sock, int flags)
125 {
126 if (!(flags & MSG_WAITALL))
127 return false;
128 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
129 }
130
io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags)131 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
132 {
133 struct io_async_msghdr *hdr = req->async_data;
134
135 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
136 return;
137
138 /* Let normal cleanup path reap it if we fail adding to the cache */
139 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
140 req->async_data = NULL;
141 req->flags &= ~REQ_F_ASYNC_DATA;
142 }
143 }
144
io_msg_alloc_async(struct io_kiocb * req,unsigned int issue_flags)145 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
146 unsigned int issue_flags)
147 {
148 struct io_ring_ctx *ctx = req->ctx;
149 struct io_cache_entry *entry;
150 struct io_async_msghdr *hdr;
151
152 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
153 entry = io_alloc_cache_get(&ctx->netmsg_cache);
154 if (entry) {
155 hdr = container_of(entry, struct io_async_msghdr, cache);
156 hdr->free_iov = NULL;
157 req->flags |= REQ_F_ASYNC_DATA;
158 req->async_data = hdr;
159 return hdr;
160 }
161 }
162
163 if (!io_alloc_async_data(req)) {
164 hdr = req->async_data;
165 hdr->free_iov = NULL;
166 return hdr;
167 }
168 return NULL;
169 }
170
io_msg_alloc_async_prep(struct io_kiocb * req)171 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
172 {
173 /* ->prep_async is always called from the submission context */
174 return io_msg_alloc_async(req, 0);
175 }
176
io_setup_async_msg(struct io_kiocb * req,struct io_async_msghdr * kmsg,unsigned int issue_flags)177 static int io_setup_async_msg(struct io_kiocb *req,
178 struct io_async_msghdr *kmsg,
179 unsigned int issue_flags)
180 {
181 struct io_async_msghdr *async_msg;
182
183 if (req_has_async_data(req))
184 return -EAGAIN;
185 async_msg = io_msg_alloc_async(req, issue_flags);
186 if (!async_msg) {
187 kfree(kmsg->free_iov);
188 return -ENOMEM;
189 }
190 req->flags |= REQ_F_NEED_CLEANUP;
191 memcpy(async_msg, kmsg, sizeof(*kmsg));
192 if (async_msg->msg.msg_name)
193 async_msg->msg.msg_name = &async_msg->addr;
194
195 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
196 return -EAGAIN;
197
198 /* if were using fast_iov, set it to the new one */
199 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
200 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
201 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
202 }
203
204 return -EAGAIN;
205 }
206
207 #ifdef CONFIG_COMPAT
io_compat_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct compat_msghdr * msg,int ddir)208 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
209 struct io_async_msghdr *iomsg,
210 struct compat_msghdr *msg, int ddir)
211 {
212 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
213 struct compat_iovec __user *uiov;
214 int ret;
215
216 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
217 return -EFAULT;
218
219 uiov = compat_ptr(msg->msg_iov);
220 if (req->flags & REQ_F_BUFFER_SELECT) {
221 compat_ssize_t clen;
222
223 iomsg->free_iov = NULL;
224 if (msg->msg_iovlen == 0) {
225 sr->len = 0;
226 } else if (msg->msg_iovlen > 1) {
227 return -EINVAL;
228 } else {
229 if (!access_ok(uiov, sizeof(*uiov)))
230 return -EFAULT;
231 if (__get_user(clen, &uiov->iov_len))
232 return -EFAULT;
233 if (clen < 0)
234 return -EINVAL;
235 sr->len = clen;
236 }
237
238 return 0;
239 }
240
241 iomsg->free_iov = iomsg->fast_iov;
242 ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
243 UIO_FASTIOV, &iomsg->free_iov,
244 &iomsg->msg.msg_iter, true);
245 if (unlikely(ret < 0))
246 return ret;
247
248 return 0;
249 }
250 #endif
251
io_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct user_msghdr * msg,int ddir)252 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
253 struct user_msghdr *msg, int ddir)
254 {
255 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
256 int ret;
257
258 if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
259 return -EFAULT;
260
261 if (req->flags & REQ_F_BUFFER_SELECT) {
262 if (msg->msg_iovlen == 0) {
263 sr->len = iomsg->fast_iov[0].iov_len = 0;
264 iomsg->fast_iov[0].iov_base = NULL;
265 iomsg->free_iov = NULL;
266 } else if (msg->msg_iovlen > 1) {
267 return -EINVAL;
268 } else {
269 if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
270 sizeof(*msg->msg_iov)))
271 return -EFAULT;
272 sr->len = iomsg->fast_iov[0].iov_len;
273 iomsg->free_iov = NULL;
274 }
275
276 return 0;
277 }
278
279 iomsg->free_iov = iomsg->fast_iov;
280 ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
281 &iomsg->free_iov, &iomsg->msg.msg_iter, false);
282 if (unlikely(ret < 0))
283 return ret;
284
285 return 0;
286 }
287
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)288 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
289 struct io_async_msghdr *iomsg)
290 {
291 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
292 struct user_msghdr msg;
293 int ret;
294
295 iomsg->msg.msg_name = &iomsg->addr;
296 iomsg->msg.msg_iter.nr_segs = 0;
297
298 #ifdef CONFIG_COMPAT
299 if (unlikely(req->ctx->compat)) {
300 struct compat_msghdr cmsg;
301
302 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
303 if (unlikely(ret))
304 return ret;
305
306 ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
307 sr->msg_control = iomsg->msg.msg_control_user;
308 return ret;
309 }
310 #endif
311
312 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
313 if (unlikely(ret))
314 return ret;
315
316 ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
317
318 /* save msg_control as sys_sendmsg() overwrites it */
319 sr->msg_control = iomsg->msg.msg_control_user;
320 return ret;
321 }
322
io_send_prep_async(struct io_kiocb * req)323 int io_send_prep_async(struct io_kiocb *req)
324 {
325 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
326 struct io_async_msghdr *io;
327 int ret;
328
329 if (!zc->addr || req_has_async_data(req))
330 return 0;
331 io = io_msg_alloc_async_prep(req);
332 if (!io)
333 return -ENOMEM;
334 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
335 return ret;
336 }
337
io_setup_async_addr(struct io_kiocb * req,struct sockaddr_storage * addr_storage,unsigned int issue_flags)338 static int io_setup_async_addr(struct io_kiocb *req,
339 struct sockaddr_storage *addr_storage,
340 unsigned int issue_flags)
341 {
342 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
343 struct io_async_msghdr *io;
344
345 if (!sr->addr || req_has_async_data(req))
346 return -EAGAIN;
347 io = io_msg_alloc_async(req, issue_flags);
348 if (!io)
349 return -ENOMEM;
350 memcpy(&io->addr, addr_storage, sizeof(io->addr));
351 return -EAGAIN;
352 }
353
io_sendmsg_prep_async(struct io_kiocb * req)354 int io_sendmsg_prep_async(struct io_kiocb *req)
355 {
356 int ret;
357
358 if (!io_msg_alloc_async_prep(req))
359 return -ENOMEM;
360 ret = io_sendmsg_copy_hdr(req, req->async_data);
361 if (!ret)
362 req->flags |= REQ_F_NEED_CLEANUP;
363 return ret;
364 }
365
io_sendmsg_recvmsg_cleanup(struct io_kiocb * req)366 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
367 {
368 struct io_async_msghdr *io = req->async_data;
369
370 kfree(io->free_iov);
371 }
372
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)373 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
374 {
375 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
376
377 if (req->opcode == IORING_OP_SEND) {
378 if (READ_ONCE(sqe->__pad3[0]))
379 return -EINVAL;
380 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
381 sr->addr_len = READ_ONCE(sqe->addr_len);
382 } else if (sqe->addr2 || sqe->file_index) {
383 return -EINVAL;
384 }
385
386 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
387 sr->len = READ_ONCE(sqe->len);
388 sr->flags = READ_ONCE(sqe->ioprio);
389 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
390 return -EINVAL;
391 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
392 if (sr->msg_flags & MSG_DONTWAIT)
393 req->flags |= REQ_F_NOWAIT;
394
395 #ifdef CONFIG_COMPAT
396 if (req->ctx->compat)
397 sr->msg_flags |= MSG_CMSG_COMPAT;
398 #endif
399 sr->done_io = 0;
400 return 0;
401 }
402
io_sendmsg(struct io_kiocb * req,unsigned int issue_flags)403 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
404 {
405 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
406 struct io_async_msghdr iomsg, *kmsg;
407 struct socket *sock;
408 unsigned flags;
409 int min_ret = 0;
410 int ret;
411
412 sock = sock_from_file(req->file);
413 if (unlikely(!sock))
414 return -ENOTSOCK;
415
416 if (req_has_async_data(req)) {
417 kmsg = req->async_data;
418 kmsg->msg.msg_control_user = sr->msg_control;
419 } else {
420 ret = io_sendmsg_copy_hdr(req, &iomsg);
421 if (ret)
422 return ret;
423 kmsg = &iomsg;
424 }
425
426 if (!(req->flags & REQ_F_POLLED) &&
427 (sr->flags & IORING_RECVSEND_POLL_FIRST))
428 return io_setup_async_msg(req, kmsg, issue_flags);
429
430 flags = sr->msg_flags;
431 if (issue_flags & IO_URING_F_NONBLOCK)
432 flags |= MSG_DONTWAIT;
433 if (flags & MSG_WAITALL)
434 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
435
436 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
437
438 if (ret < min_ret) {
439 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
440 return io_setup_async_msg(req, kmsg, issue_flags);
441 if (ret > 0 && io_net_retry(sock, flags)) {
442 kmsg->msg.msg_controllen = 0;
443 kmsg->msg.msg_control = NULL;
444 sr->done_io += ret;
445 req->flags |= REQ_F_PARTIAL_IO;
446 return io_setup_async_msg(req, kmsg, issue_flags);
447 }
448 if (ret == -ERESTARTSYS)
449 ret = -EINTR;
450 req_set_fail(req);
451 }
452 /* fast path, check for non-NULL to avoid function call */
453 if (kmsg->free_iov)
454 kfree(kmsg->free_iov);
455 req->flags &= ~REQ_F_NEED_CLEANUP;
456 io_netmsg_recycle(req, issue_flags);
457 if (ret >= 0)
458 ret += sr->done_io;
459 else if (sr->done_io)
460 ret = sr->done_io;
461 io_req_set_res(req, ret, 0);
462 return IOU_OK;
463 }
464
io_send(struct io_kiocb * req,unsigned int issue_flags)465 int io_send(struct io_kiocb *req, unsigned int issue_flags)
466 {
467 struct sockaddr_storage __address;
468 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
469 struct msghdr msg;
470 struct socket *sock;
471 unsigned flags;
472 int min_ret = 0;
473 int ret;
474
475 msg.msg_name = NULL;
476 msg.msg_control = NULL;
477 msg.msg_controllen = 0;
478 msg.msg_namelen = 0;
479 msg.msg_ubuf = NULL;
480
481 if (sr->addr) {
482 if (req_has_async_data(req)) {
483 struct io_async_msghdr *io = req->async_data;
484
485 msg.msg_name = &io->addr;
486 } else {
487 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
488 if (unlikely(ret < 0))
489 return ret;
490 msg.msg_name = (struct sockaddr *)&__address;
491 }
492 msg.msg_namelen = sr->addr_len;
493 }
494
495 if (!(req->flags & REQ_F_POLLED) &&
496 (sr->flags & IORING_RECVSEND_POLL_FIRST))
497 return io_setup_async_addr(req, &__address, issue_flags);
498
499 sock = sock_from_file(req->file);
500 if (unlikely(!sock))
501 return -ENOTSOCK;
502
503 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
504 if (unlikely(ret))
505 return ret;
506
507 flags = sr->msg_flags;
508 if (issue_flags & IO_URING_F_NONBLOCK)
509 flags |= MSG_DONTWAIT;
510 if (flags & MSG_WAITALL)
511 min_ret = iov_iter_count(&msg.msg_iter);
512
513 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
514 msg.msg_flags = flags;
515 ret = sock_sendmsg(sock, &msg);
516 if (ret < min_ret) {
517 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
518 return io_setup_async_addr(req, &__address, issue_flags);
519
520 if (ret > 0 && io_net_retry(sock, flags)) {
521 sr->len -= ret;
522 sr->buf += ret;
523 sr->done_io += ret;
524 req->flags |= REQ_F_PARTIAL_IO;
525 return io_setup_async_addr(req, &__address, issue_flags);
526 }
527 if (ret == -ERESTARTSYS)
528 ret = -EINTR;
529 req_set_fail(req);
530 }
531 if (ret >= 0)
532 ret += sr->done_io;
533 else if (sr->done_io)
534 ret = sr->done_io;
535 io_req_set_res(req, ret, 0);
536 return IOU_OK;
537 }
538
io_recvmsg_mshot_prep(struct io_kiocb * req,struct io_async_msghdr * iomsg,int namelen,size_t controllen)539 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
540 struct io_async_msghdr *iomsg,
541 int namelen, size_t controllen)
542 {
543 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
544 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
545 int hdr;
546
547 if (unlikely(namelen < 0))
548 return -EOVERFLOW;
549 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
550 namelen, &hdr))
551 return -EOVERFLOW;
552 if (check_add_overflow(hdr, controllen, &hdr))
553 return -EOVERFLOW;
554
555 iomsg->namelen = namelen;
556 iomsg->controllen = controllen;
557 return 0;
558 }
559
560 return 0;
561 }
562
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)563 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
564 struct io_async_msghdr *iomsg)
565 {
566 struct user_msghdr msg;
567 int ret;
568
569 iomsg->msg.msg_name = &iomsg->addr;
570 iomsg->msg.msg_iter.nr_segs = 0;
571
572 #ifdef CONFIG_COMPAT
573 if (unlikely(req->ctx->compat)) {
574 struct compat_msghdr cmsg;
575
576 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
577 if (unlikely(ret))
578 return ret;
579
580 ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
581 if (unlikely(ret))
582 return ret;
583
584 return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
585 cmsg.msg_controllen);
586 }
587 #endif
588
589 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
590 if (unlikely(ret))
591 return ret;
592
593 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
594 if (unlikely(ret))
595 return ret;
596
597 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
598 msg.msg_controllen);
599 }
600
io_recvmsg_prep_async(struct io_kiocb * req)601 int io_recvmsg_prep_async(struct io_kiocb *req)
602 {
603 struct io_async_msghdr *iomsg;
604 int ret;
605
606 if (!io_msg_alloc_async_prep(req))
607 return -ENOMEM;
608 iomsg = req->async_data;
609 ret = io_recvmsg_copy_hdr(req, iomsg);
610 if (!ret)
611 req->flags |= REQ_F_NEED_CLEANUP;
612 return ret;
613 }
614
615 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
616
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)617 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
618 {
619 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
620
621 if (unlikely(sqe->file_index || sqe->addr2))
622 return -EINVAL;
623
624 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
625 sr->len = READ_ONCE(sqe->len);
626 sr->flags = READ_ONCE(sqe->ioprio);
627 if (sr->flags & ~(RECVMSG_FLAGS))
628 return -EINVAL;
629 sr->msg_flags = READ_ONCE(sqe->msg_flags);
630 if (sr->msg_flags & MSG_DONTWAIT)
631 req->flags |= REQ_F_NOWAIT;
632 if (sr->msg_flags & MSG_ERRQUEUE)
633 req->flags |= REQ_F_CLEAR_POLLIN;
634 if (sr->flags & IORING_RECV_MULTISHOT) {
635 if (!(req->flags & REQ_F_BUFFER_SELECT))
636 return -EINVAL;
637 if (sr->msg_flags & MSG_WAITALL)
638 return -EINVAL;
639 if (req->opcode == IORING_OP_RECV && sr->len)
640 return -EINVAL;
641 req->flags |= REQ_F_APOLL_MULTISHOT;
642 /*
643 * Store the buffer group for this multishot receive separately,
644 * as if we end up doing an io-wq based issue that selects a
645 * buffer, it has to be committed immediately and that will
646 * clear ->buf_list. This means we lose the link to the buffer
647 * list, and the eventual buffer put on completion then cannot
648 * restore it.
649 */
650 sr->buf_group = req->buf_index;
651 }
652
653 #ifdef CONFIG_COMPAT
654 if (req->ctx->compat)
655 sr->msg_flags |= MSG_CMSG_COMPAT;
656 #endif
657 sr->done_io = 0;
658 sr->nr_multishot_loops = 0;
659 return 0;
660 }
661
io_recv_prep_retry(struct io_kiocb * req)662 static inline void io_recv_prep_retry(struct io_kiocb *req)
663 {
664 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
665
666 sr->done_io = 0;
667 sr->len = 0; /* get from the provided buffer */
668 req->buf_index = sr->buf_group;
669 }
670
671 /*
672 * Finishes io_recv and io_recvmsg.
673 *
674 * Returns true if it is actually finished, or false if it should run
675 * again (for multishot).
676 */
io_recv_finish(struct io_kiocb * req,int * ret,struct msghdr * msg,bool mshot_finished,unsigned issue_flags)677 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
678 struct msghdr *msg, bool mshot_finished,
679 unsigned issue_flags)
680 {
681 unsigned int cflags;
682
683 cflags = io_put_kbuf(req, issue_flags);
684 if (msg->msg_inq && msg->msg_inq != -1)
685 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
686
687 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
688 io_req_set_res(req, *ret, cflags);
689 *ret = IOU_OK;
690 return true;
691 }
692
693 if (mshot_finished)
694 goto finish;
695
696 /*
697 * Fill CQE for this receive and see if we should keep trying to
698 * receive from this socket.
699 */
700 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
701 *ret, cflags | IORING_CQE_F_MORE)) {
702 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
703 int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
704
705 io_recv_prep_retry(req);
706 /* Known not-empty or unknown state, retry */
707 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
708 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
709 return false;
710 /* mshot retries exceeded, force a requeue */
711 sr->nr_multishot_loops = 0;
712 mshot_retry_ret = IOU_REQUEUE;
713 }
714 if (issue_flags & IO_URING_F_MULTISHOT)
715 *ret = mshot_retry_ret;
716 else
717 *ret = -EAGAIN;
718 return true;
719 }
720 /* Otherwise stop multishot but use the current result. */
721 finish:
722 io_req_set_res(req, *ret, cflags);
723
724 if (issue_flags & IO_URING_F_MULTISHOT)
725 *ret = IOU_STOP_MULTISHOT;
726 else
727 *ret = IOU_OK;
728 return true;
729 }
730
io_recvmsg_prep_multishot(struct io_async_msghdr * kmsg,struct io_sr_msg * sr,void __user ** buf,size_t * len)731 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
732 struct io_sr_msg *sr, void __user **buf,
733 size_t *len)
734 {
735 unsigned long ubuf = (unsigned long) *buf;
736 unsigned long hdr;
737
738 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
739 kmsg->controllen;
740 if (*len < hdr)
741 return -EFAULT;
742
743 if (kmsg->controllen) {
744 unsigned long control = ubuf + hdr - kmsg->controllen;
745
746 kmsg->msg.msg_control_user = (void __user *) control;
747 kmsg->msg.msg_controllen = kmsg->controllen;
748 }
749
750 sr->buf = *buf; /* stash for later copy */
751 *buf = (void __user *) (ubuf + hdr);
752 kmsg->payloadlen = *len = *len - hdr;
753 return 0;
754 }
755
756 struct io_recvmsg_multishot_hdr {
757 struct io_uring_recvmsg_out msg;
758 struct sockaddr_storage addr;
759 };
760
io_recvmsg_multishot(struct socket * sock,struct io_sr_msg * io,struct io_async_msghdr * kmsg,unsigned int flags,bool * finished)761 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
762 struct io_async_msghdr *kmsg,
763 unsigned int flags, bool *finished)
764 {
765 int err;
766 int copy_len;
767 struct io_recvmsg_multishot_hdr hdr;
768
769 if (kmsg->namelen)
770 kmsg->msg.msg_name = &hdr.addr;
771 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
772 kmsg->msg.msg_namelen = 0;
773
774 if (sock->file->f_flags & O_NONBLOCK)
775 flags |= MSG_DONTWAIT;
776
777 err = sock_recvmsg(sock, &kmsg->msg, flags);
778 *finished = err <= 0;
779 if (err < 0)
780 return err;
781
782 hdr.msg = (struct io_uring_recvmsg_out) {
783 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
784 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
785 };
786
787 hdr.msg.payloadlen = err;
788 if (err > kmsg->payloadlen)
789 err = kmsg->payloadlen;
790
791 copy_len = sizeof(struct io_uring_recvmsg_out);
792 if (kmsg->msg.msg_namelen > kmsg->namelen)
793 copy_len += kmsg->namelen;
794 else
795 copy_len += kmsg->msg.msg_namelen;
796
797 /*
798 * "fromlen shall refer to the value before truncation.."
799 * 1003.1g
800 */
801 hdr.msg.namelen = kmsg->msg.msg_namelen;
802
803 /* ensure that there is no gap between hdr and sockaddr_storage */
804 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
805 sizeof(struct io_uring_recvmsg_out));
806 if (copy_to_user(io->buf, &hdr, copy_len)) {
807 *finished = true;
808 return -EFAULT;
809 }
810
811 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
812 kmsg->controllen + err;
813 }
814
io_recvmsg(struct io_kiocb * req,unsigned int issue_flags)815 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
816 {
817 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
818 struct io_async_msghdr iomsg, *kmsg;
819 struct socket *sock;
820 unsigned flags;
821 int ret, min_ret = 0;
822 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
823 bool mshot_finished = true;
824
825 sock = sock_from_file(req->file);
826 if (unlikely(!sock))
827 return -ENOTSOCK;
828
829 if (req_has_async_data(req)) {
830 kmsg = req->async_data;
831 } else {
832 ret = io_recvmsg_copy_hdr(req, &iomsg);
833 if (ret)
834 return ret;
835 kmsg = &iomsg;
836 }
837
838 if (!(req->flags & REQ_F_POLLED) &&
839 (sr->flags & IORING_RECVSEND_POLL_FIRST))
840 return io_setup_async_msg(req, kmsg, issue_flags);
841
842 if (!io_check_multishot(req, issue_flags))
843 return io_setup_async_msg(req, kmsg, issue_flags);
844
845 retry_multishot:
846 if (io_do_buffer_select(req)) {
847 void __user *buf;
848 size_t len = sr->len;
849
850 buf = io_buffer_select(req, &len, issue_flags);
851 if (!buf)
852 return -ENOBUFS;
853
854 if (req->flags & REQ_F_APOLL_MULTISHOT) {
855 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
856 if (ret) {
857 io_kbuf_recycle(req, issue_flags);
858 return ret;
859 }
860 }
861
862 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
863 }
864
865 flags = sr->msg_flags;
866 if (force_nonblock)
867 flags |= MSG_DONTWAIT;
868
869 kmsg->msg.msg_get_inq = 1;
870 kmsg->msg.msg_inq = -1;
871 if (req->flags & REQ_F_APOLL_MULTISHOT) {
872 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
873 &mshot_finished);
874 } else {
875 /* disable partial retry for recvmsg with cmsg attached */
876 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
877 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
878
879 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
880 kmsg->uaddr, flags);
881 }
882
883 if (ret < min_ret) {
884 if (ret == -EAGAIN && force_nonblock) {
885 ret = io_setup_async_msg(req, kmsg, issue_flags);
886 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
887 io_kbuf_recycle(req, issue_flags);
888 return IOU_ISSUE_SKIP_COMPLETE;
889 }
890 return ret;
891 }
892 if (ret > 0 && io_net_retry(sock, flags)) {
893 sr->done_io += ret;
894 req->flags |= REQ_F_PARTIAL_IO;
895 return io_setup_async_msg(req, kmsg, issue_flags);
896 }
897 if (ret == -ERESTARTSYS)
898 ret = -EINTR;
899 req_set_fail(req);
900 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
901 req_set_fail(req);
902 }
903
904 if (ret > 0)
905 ret += sr->done_io;
906 else if (sr->done_io)
907 ret = sr->done_io;
908 else
909 io_kbuf_recycle(req, issue_flags);
910
911 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
912 goto retry_multishot;
913
914 if (mshot_finished) {
915 /* fast path, check for non-NULL to avoid function call */
916 if (kmsg->free_iov)
917 kfree(kmsg->free_iov);
918 io_netmsg_recycle(req, issue_flags);
919 req->flags &= ~REQ_F_NEED_CLEANUP;
920 } else if (ret == -EAGAIN)
921 return io_setup_async_msg(req, kmsg, issue_flags);
922
923 return ret;
924 }
925
io_recv(struct io_kiocb * req,unsigned int issue_flags)926 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
927 {
928 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
929 struct msghdr msg;
930 struct socket *sock;
931 unsigned flags;
932 int ret, min_ret = 0;
933 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
934 size_t len = sr->len;
935 bool mshot_finished;
936
937 if (!(req->flags & REQ_F_POLLED) &&
938 (sr->flags & IORING_RECVSEND_POLL_FIRST))
939 return -EAGAIN;
940
941 if (!io_check_multishot(req, issue_flags))
942 return -EAGAIN;
943
944 sock = sock_from_file(req->file);
945 if (unlikely(!sock))
946 return -ENOTSOCK;
947
948 msg.msg_name = NULL;
949 msg.msg_namelen = 0;
950 msg.msg_control = NULL;
951 msg.msg_get_inq = 1;
952 msg.msg_controllen = 0;
953 msg.msg_iocb = NULL;
954 msg.msg_ubuf = NULL;
955
956 retry_multishot:
957 if (io_do_buffer_select(req)) {
958 void __user *buf;
959
960 buf = io_buffer_select(req, &len, issue_flags);
961 if (!buf)
962 return -ENOBUFS;
963 sr->buf = buf;
964 sr->len = len;
965 }
966
967 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
968 if (unlikely(ret))
969 goto out_free;
970
971 msg.msg_inq = -1;
972 msg.msg_flags = 0;
973
974 flags = sr->msg_flags;
975 if (force_nonblock)
976 flags |= MSG_DONTWAIT;
977 if (flags & MSG_WAITALL)
978 min_ret = iov_iter_count(&msg.msg_iter);
979
980 ret = sock_recvmsg(sock, &msg, flags);
981 if (ret < min_ret) {
982 if (ret == -EAGAIN && force_nonblock) {
983 if (issue_flags & IO_URING_F_MULTISHOT) {
984 io_kbuf_recycle(req, issue_flags);
985 return IOU_ISSUE_SKIP_COMPLETE;
986 }
987
988 return -EAGAIN;
989 }
990 if (ret > 0 && io_net_retry(sock, flags)) {
991 sr->len -= ret;
992 sr->buf += ret;
993 sr->done_io += ret;
994 req->flags |= REQ_F_PARTIAL_IO;
995 return -EAGAIN;
996 }
997 if (ret == -ERESTARTSYS)
998 ret = -EINTR;
999 req_set_fail(req);
1000 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1001 out_free:
1002 req_set_fail(req);
1003 }
1004
1005 mshot_finished = ret <= 0;
1006 if (ret > 0)
1007 ret += sr->done_io;
1008 else if (sr->done_io)
1009 ret = sr->done_io;
1010 else
1011 io_kbuf_recycle(req, issue_flags);
1012
1013 if (!io_recv_finish(req, &ret, &msg, mshot_finished, issue_flags))
1014 goto retry_multishot;
1015
1016 return ret;
1017 }
1018
io_send_zc_cleanup(struct io_kiocb * req)1019 void io_send_zc_cleanup(struct io_kiocb *req)
1020 {
1021 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1022 struct io_async_msghdr *io;
1023
1024 if (req_has_async_data(req)) {
1025 io = req->async_data;
1026 /* might be ->fast_iov if *msg_copy_hdr failed */
1027 if (io->free_iov != io->fast_iov)
1028 kfree(io->free_iov);
1029 }
1030 if (zc->notif) {
1031 io_notif_flush(zc->notif);
1032 zc->notif = NULL;
1033 }
1034 }
1035
1036 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1037 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1038
io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1039 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1040 {
1041 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1042 struct io_ring_ctx *ctx = req->ctx;
1043 struct io_kiocb *notif;
1044
1045 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1046 return -EINVAL;
1047 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1048 if (req->flags & REQ_F_CQE_SKIP)
1049 return -EINVAL;
1050
1051 notif = zc->notif = io_alloc_notif(ctx);
1052 if (!notif)
1053 return -ENOMEM;
1054 notif->cqe.user_data = req->cqe.user_data;
1055 notif->cqe.res = 0;
1056 notif->cqe.flags = IORING_CQE_F_NOTIF;
1057 req->flags |= REQ_F_NEED_CLEANUP;
1058
1059 zc->flags = READ_ONCE(sqe->ioprio);
1060 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1061 if (zc->flags & ~IO_ZC_FLAGS_VALID)
1062 return -EINVAL;
1063 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1064 io_notif_set_extended(notif);
1065 io_notif_to_data(notif)->zc_report = true;
1066 }
1067 }
1068
1069 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1070 unsigned idx = READ_ONCE(sqe->buf_index);
1071
1072 if (unlikely(idx >= ctx->nr_user_bufs))
1073 return -EFAULT;
1074 idx = array_index_nospec(idx, ctx->nr_user_bufs);
1075 req->imu = READ_ONCE(ctx->user_bufs[idx]);
1076 io_req_set_rsrc_node(notif, ctx, 0);
1077 }
1078
1079 if (req->opcode == IORING_OP_SEND_ZC) {
1080 if (READ_ONCE(sqe->__pad3[0]))
1081 return -EINVAL;
1082 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1083 zc->addr_len = READ_ONCE(sqe->addr_len);
1084 } else {
1085 if (unlikely(sqe->addr2 || sqe->file_index))
1086 return -EINVAL;
1087 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1088 return -EINVAL;
1089 }
1090
1091 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1092 zc->len = READ_ONCE(sqe->len);
1093 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1094 if (zc->msg_flags & MSG_DONTWAIT)
1095 req->flags |= REQ_F_NOWAIT;
1096
1097 zc->done_io = 0;
1098
1099 #ifdef CONFIG_COMPAT
1100 if (req->ctx->compat)
1101 zc->msg_flags |= MSG_CMSG_COMPAT;
1102 #endif
1103 return 0;
1104 }
1105
io_sg_from_iter_iovec(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1106 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1107 struct iov_iter *from, size_t length)
1108 {
1109 skb_zcopy_downgrade_managed(skb);
1110 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1111 }
1112
io_sg_from_iter(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1113 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1114 struct iov_iter *from, size_t length)
1115 {
1116 struct skb_shared_info *shinfo = skb_shinfo(skb);
1117 int frag = shinfo->nr_frags;
1118 int ret = 0;
1119 struct bvec_iter bi;
1120 ssize_t copied = 0;
1121 unsigned long truesize = 0;
1122
1123 if (!frag)
1124 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1125 else if (unlikely(!skb_zcopy_managed(skb)))
1126 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1127
1128 bi.bi_size = min(from->count, length);
1129 bi.bi_bvec_done = from->iov_offset;
1130 bi.bi_idx = 0;
1131
1132 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1133 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1134
1135 copied += v.bv_len;
1136 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1137 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1138 v.bv_offset, v.bv_len);
1139 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1140 }
1141 if (bi.bi_size)
1142 ret = -EMSGSIZE;
1143
1144 shinfo->nr_frags = frag;
1145 from->bvec += bi.bi_idx;
1146 from->nr_segs -= bi.bi_idx;
1147 from->count -= copied;
1148 from->iov_offset = bi.bi_bvec_done;
1149
1150 skb->data_len += copied;
1151 skb->len += copied;
1152 skb->truesize += truesize;
1153
1154 if (sk && sk->sk_type == SOCK_STREAM) {
1155 sk_wmem_queued_add(sk, truesize);
1156 if (!skb_zcopy_pure(skb))
1157 sk_mem_charge(sk, truesize);
1158 } else {
1159 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1160 }
1161 return ret;
1162 }
1163
io_send_zc(struct io_kiocb * req,unsigned int issue_flags)1164 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1165 {
1166 struct sockaddr_storage __address;
1167 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1168 struct msghdr msg;
1169 struct socket *sock;
1170 unsigned msg_flags;
1171 int ret, min_ret = 0;
1172
1173 sock = sock_from_file(req->file);
1174 if (unlikely(!sock))
1175 return -ENOTSOCK;
1176 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1177 return -EOPNOTSUPP;
1178
1179 msg.msg_name = NULL;
1180 msg.msg_control = NULL;
1181 msg.msg_controllen = 0;
1182 msg.msg_namelen = 0;
1183
1184 if (zc->addr) {
1185 if (req_has_async_data(req)) {
1186 struct io_async_msghdr *io = req->async_data;
1187
1188 msg.msg_name = &io->addr;
1189 } else {
1190 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1191 if (unlikely(ret < 0))
1192 return ret;
1193 msg.msg_name = (struct sockaddr *)&__address;
1194 }
1195 msg.msg_namelen = zc->addr_len;
1196 }
1197
1198 if (!(req->flags & REQ_F_POLLED) &&
1199 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1200 return io_setup_async_addr(req, &__address, issue_flags);
1201
1202 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1203 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1204 (u64)(uintptr_t)zc->buf, zc->len);
1205 if (unlikely(ret))
1206 return ret;
1207 msg.sg_from_iter = io_sg_from_iter;
1208 } else {
1209 io_notif_set_extended(zc->notif);
1210 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1211 if (unlikely(ret))
1212 return ret;
1213 ret = io_notif_account_mem(zc->notif, zc->len);
1214 if (unlikely(ret))
1215 return ret;
1216 msg.sg_from_iter = io_sg_from_iter_iovec;
1217 }
1218
1219 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1220 if (issue_flags & IO_URING_F_NONBLOCK)
1221 msg_flags |= MSG_DONTWAIT;
1222 if (msg_flags & MSG_WAITALL)
1223 min_ret = iov_iter_count(&msg.msg_iter);
1224 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1225
1226 msg.msg_flags = msg_flags;
1227 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1228 ret = sock_sendmsg(sock, &msg);
1229
1230 if (unlikely(ret < min_ret)) {
1231 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1232 return io_setup_async_addr(req, &__address, issue_flags);
1233
1234 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1235 zc->len -= ret;
1236 zc->buf += ret;
1237 zc->done_io += ret;
1238 req->flags |= REQ_F_PARTIAL_IO;
1239 return io_setup_async_addr(req, &__address, issue_flags);
1240 }
1241 if (ret == -ERESTARTSYS)
1242 ret = -EINTR;
1243 req_set_fail(req);
1244 }
1245
1246 if (ret >= 0)
1247 ret += zc->done_io;
1248 else if (zc->done_io)
1249 ret = zc->done_io;
1250
1251 /*
1252 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1253 * flushing notif to io_send_zc_cleanup()
1254 */
1255 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1256 io_notif_flush(zc->notif);
1257 req->flags &= ~REQ_F_NEED_CLEANUP;
1258 }
1259 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1260 return IOU_OK;
1261 }
1262
io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags)1263 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1264 {
1265 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1266 struct io_async_msghdr iomsg, *kmsg;
1267 struct socket *sock;
1268 unsigned flags;
1269 int ret, min_ret = 0;
1270
1271 io_notif_set_extended(sr->notif);
1272
1273 sock = sock_from_file(req->file);
1274 if (unlikely(!sock))
1275 return -ENOTSOCK;
1276 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1277 return -EOPNOTSUPP;
1278
1279 if (req_has_async_data(req)) {
1280 kmsg = req->async_data;
1281 kmsg->msg.msg_control_user = sr->msg_control;
1282 } else {
1283 ret = io_sendmsg_copy_hdr(req, &iomsg);
1284 if (ret)
1285 return ret;
1286 kmsg = &iomsg;
1287 }
1288
1289 if (!(req->flags & REQ_F_POLLED) &&
1290 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1291 return io_setup_async_msg(req, kmsg, issue_flags);
1292
1293 flags = sr->msg_flags | MSG_ZEROCOPY;
1294 if (issue_flags & IO_URING_F_NONBLOCK)
1295 flags |= MSG_DONTWAIT;
1296 if (flags & MSG_WAITALL)
1297 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1298
1299 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1300 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1301 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1302
1303 if (unlikely(ret < min_ret)) {
1304 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1305 return io_setup_async_msg(req, kmsg, issue_flags);
1306
1307 if (ret > 0 && io_net_retry(sock, flags)) {
1308 sr->done_io += ret;
1309 req->flags |= REQ_F_PARTIAL_IO;
1310 return io_setup_async_msg(req, kmsg, issue_flags);
1311 }
1312 if (ret == -ERESTARTSYS)
1313 ret = -EINTR;
1314 req_set_fail(req);
1315 }
1316 /* fast path, check for non-NULL to avoid function call */
1317 if (kmsg->free_iov) {
1318 kfree(kmsg->free_iov);
1319 kmsg->free_iov = NULL;
1320 }
1321
1322 io_netmsg_recycle(req, issue_flags);
1323 if (ret >= 0)
1324 ret += sr->done_io;
1325 else if (sr->done_io)
1326 ret = sr->done_io;
1327
1328 /*
1329 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1330 * flushing notif to io_send_zc_cleanup()
1331 */
1332 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1333 io_notif_flush(sr->notif);
1334 req->flags &= ~REQ_F_NEED_CLEANUP;
1335 }
1336 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1337 return IOU_OK;
1338 }
1339
io_sendrecv_fail(struct io_kiocb * req)1340 void io_sendrecv_fail(struct io_kiocb *req)
1341 {
1342 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1343
1344 if (req->flags & REQ_F_PARTIAL_IO)
1345 req->cqe.res = sr->done_io;
1346
1347 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1348 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1349 req->cqe.flags |= IORING_CQE_F_MORE;
1350 }
1351
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1352 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1353 {
1354 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1355 unsigned flags;
1356
1357 if (sqe->len || sqe->buf_index)
1358 return -EINVAL;
1359
1360 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1361 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1362 accept->flags = READ_ONCE(sqe->accept_flags);
1363 accept->nofile = rlimit(RLIMIT_NOFILE);
1364 flags = READ_ONCE(sqe->ioprio);
1365 if (flags & ~IORING_ACCEPT_MULTISHOT)
1366 return -EINVAL;
1367
1368 accept->file_slot = READ_ONCE(sqe->file_index);
1369 if (accept->file_slot) {
1370 if (accept->flags & SOCK_CLOEXEC)
1371 return -EINVAL;
1372 if (flags & IORING_ACCEPT_MULTISHOT &&
1373 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1374 return -EINVAL;
1375 }
1376 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1377 return -EINVAL;
1378 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1379 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1380 if (flags & IORING_ACCEPT_MULTISHOT)
1381 req->flags |= REQ_F_APOLL_MULTISHOT;
1382 return 0;
1383 }
1384
io_accept(struct io_kiocb * req,unsigned int issue_flags)1385 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1386 {
1387 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1388 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1389 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1390 bool fixed = !!accept->file_slot;
1391 struct file *file;
1392 int ret, fd;
1393
1394 if (!io_check_multishot(req, issue_flags))
1395 return -EAGAIN;
1396 retry:
1397 if (!fixed) {
1398 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1399 if (unlikely(fd < 0))
1400 return fd;
1401 }
1402 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1403 accept->flags);
1404 if (IS_ERR(file)) {
1405 if (!fixed)
1406 put_unused_fd(fd);
1407 ret = PTR_ERR(file);
1408 if (ret == -EAGAIN && force_nonblock) {
1409 /*
1410 * if it's multishot and polled, we don't need to
1411 * return EAGAIN to arm the poll infra since it
1412 * has already been done
1413 */
1414 if (issue_flags & IO_URING_F_MULTISHOT)
1415 return IOU_ISSUE_SKIP_COMPLETE;
1416 return ret;
1417 }
1418 if (ret == -ERESTARTSYS)
1419 ret = -EINTR;
1420 req_set_fail(req);
1421 } else if (!fixed) {
1422 fd_install(fd, file);
1423 ret = fd;
1424 } else {
1425 ret = io_fixed_fd_install(req, issue_flags, file,
1426 accept->file_slot);
1427 }
1428
1429 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1430 io_req_set_res(req, ret, 0);
1431 return IOU_OK;
1432 }
1433
1434 if (ret < 0)
1435 return ret;
1436 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
1437 ret, IORING_CQE_F_MORE))
1438 goto retry;
1439
1440 io_req_set_res(req, ret, 0);
1441 return IOU_STOP_MULTISHOT;
1442 }
1443
io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1444 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1445 {
1446 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1447
1448 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1449 return -EINVAL;
1450
1451 sock->domain = READ_ONCE(sqe->fd);
1452 sock->type = READ_ONCE(sqe->off);
1453 sock->protocol = READ_ONCE(sqe->len);
1454 sock->file_slot = READ_ONCE(sqe->file_index);
1455 sock->nofile = rlimit(RLIMIT_NOFILE);
1456
1457 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1458 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1459 return -EINVAL;
1460 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1461 return -EINVAL;
1462 return 0;
1463 }
1464
io_socket(struct io_kiocb * req,unsigned int issue_flags)1465 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1466 {
1467 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1468 bool fixed = !!sock->file_slot;
1469 struct file *file;
1470 int ret, fd;
1471
1472 if (!fixed) {
1473 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1474 if (unlikely(fd < 0))
1475 return fd;
1476 }
1477 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1478 if (IS_ERR(file)) {
1479 if (!fixed)
1480 put_unused_fd(fd);
1481 ret = PTR_ERR(file);
1482 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1483 return -EAGAIN;
1484 if (ret == -ERESTARTSYS)
1485 ret = -EINTR;
1486 req_set_fail(req);
1487 } else if (!fixed) {
1488 fd_install(fd, file);
1489 ret = fd;
1490 } else {
1491 ret = io_fixed_fd_install(req, issue_flags, file,
1492 sock->file_slot);
1493 }
1494 io_req_set_res(req, ret, 0);
1495 return IOU_OK;
1496 }
1497
io_connect_prep_async(struct io_kiocb * req)1498 int io_connect_prep_async(struct io_kiocb *req)
1499 {
1500 struct io_async_connect *io = req->async_data;
1501 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1502
1503 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1504 }
1505
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1506 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1507 {
1508 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1509
1510 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1511 return -EINVAL;
1512
1513 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1514 conn->addr_len = READ_ONCE(sqe->addr2);
1515 conn->in_progress = conn->seen_econnaborted = false;
1516 return 0;
1517 }
1518
io_connect(struct io_kiocb * req,unsigned int issue_flags)1519 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1520 {
1521 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1522 struct io_async_connect __io, *io;
1523 unsigned file_flags;
1524 int ret;
1525 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1526
1527 if (req_has_async_data(req)) {
1528 io = req->async_data;
1529 } else {
1530 ret = move_addr_to_kernel(connect->addr,
1531 connect->addr_len,
1532 &__io.address);
1533 if (ret)
1534 goto out;
1535 io = &__io;
1536 }
1537
1538 if (unlikely(req->flags & REQ_F_FAIL)) {
1539 ret = -ECONNRESET;
1540 goto out;
1541 }
1542
1543 file_flags = force_nonblock ? O_NONBLOCK : 0;
1544
1545 ret = __sys_connect_file(req->file, &io->address,
1546 connect->addr_len, file_flags);
1547 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1548 && force_nonblock) {
1549 if (ret == -EINPROGRESS) {
1550 connect->in_progress = true;
1551 } else if (ret == -ECONNABORTED) {
1552 if (connect->seen_econnaborted)
1553 goto out;
1554 connect->seen_econnaborted = true;
1555 }
1556 if (req_has_async_data(req))
1557 return -EAGAIN;
1558 if (io_alloc_async_data(req)) {
1559 ret = -ENOMEM;
1560 goto out;
1561 }
1562 memcpy(req->async_data, &__io, sizeof(__io));
1563 return -EAGAIN;
1564 }
1565 if (connect->in_progress) {
1566 /*
1567 * At least bluetooth will return -EBADFD on a re-connect
1568 * attempt, and it's (supposedly) also valid to get -EISCONN
1569 * which means the previous result is good. For both of these,
1570 * grab the sock_error() and use that for the completion.
1571 */
1572 if (ret == -EBADFD || ret == -EISCONN)
1573 ret = sock_error(sock_from_file(req->file)->sk);
1574 }
1575 if (ret == -ERESTARTSYS)
1576 ret = -EINTR;
1577 out:
1578 if (ret < 0)
1579 req_set_fail(req);
1580 io_req_set_res(req, ret, 0);
1581 return IOU_OK;
1582 }
1583
io_netmsg_cache_free(struct io_cache_entry * entry)1584 void io_netmsg_cache_free(struct io_cache_entry *entry)
1585 {
1586 kfree(container_of(entry, struct io_async_msghdr, cache));
1587 }
1588 #endif
1589