1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "io_uring.h" 14 #include "kbuf.h" 15 #include "alloc_cache.h" 16 #include "net.h" 17 #include "notif.h" 18 #include "rsrc.h" 19 20 #if defined(CONFIG_NET) 21 struct io_shutdown { 22 struct file *file; 23 int how; 24 }; 25 26 struct io_accept { 27 struct file *file; 28 struct sockaddr __user *addr; 29 int __user *addr_len; 30 int flags; 31 u32 file_slot; 32 unsigned long nofile; 33 }; 34 35 struct io_socket { 36 struct file *file; 37 int domain; 38 int type; 39 int protocol; 40 int flags; 41 u32 file_slot; 42 unsigned long nofile; 43 }; 44 45 struct io_connect { 46 struct file *file; 47 struct sockaddr __user *addr; 48 int addr_len; 49 bool in_progress; 50 bool seen_econnaborted; 51 }; 52 53 struct io_sr_msg { 54 struct file *file; 55 union { 56 struct compat_msghdr __user *umsg_compat; 57 struct user_msghdr __user *umsg; 58 void __user *buf; 59 }; 60 unsigned len; 61 unsigned done_io; 62 unsigned msg_flags; 63 u16 flags; 64 /* initialised and used only by !msg send variants */ 65 u16 addr_len; 66 u16 buf_group; 67 void __user *addr; 68 void __user *msg_control; 69 /* used only for send zerocopy */ 70 struct io_kiocb *notif; 71 }; 72 73 static inline bool io_check_multishot(struct io_kiocb *req, 74 unsigned int issue_flags) 75 { 76 /* 77 * When ->locked_cq is set we only allow to post CQEs from the original 78 * task context. Usual request completions will be handled in other 79 * generic paths but multipoll may decide to post extra cqes. 80 */ 81 return !(issue_flags & IO_URING_F_IOWQ) || 82 !(issue_flags & IO_URING_F_MULTISHOT) || 83 !req->ctx->task_complete; 84 } 85 86 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 87 { 88 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 89 90 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 91 sqe->buf_index || sqe->splice_fd_in)) 92 return -EINVAL; 93 94 shutdown->how = READ_ONCE(sqe->len); 95 req->flags |= REQ_F_FORCE_ASYNC; 96 return 0; 97 } 98 99 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 100 { 101 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 102 struct socket *sock; 103 int ret; 104 105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 106 107 sock = sock_from_file(req->file); 108 if (unlikely(!sock)) 109 return -ENOTSOCK; 110 111 ret = __sys_shutdown_sock(sock, shutdown->how); 112 io_req_set_res(req, ret, 0); 113 return IOU_OK; 114 } 115 116 static bool io_net_retry(struct socket *sock, int flags) 117 { 118 if (!(flags & MSG_WAITALL)) 119 return false; 120 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 121 } 122 123 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 124 { 125 struct io_async_msghdr *hdr = req->async_data; 126 127 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) 128 return; 129 130 /* Let normal cleanup path reap it if we fail adding to the cache */ 131 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { 132 req->async_data = NULL; 133 req->flags &= ~REQ_F_ASYNC_DATA; 134 } 135 } 136 137 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, 138 unsigned int issue_flags) 139 { 140 struct io_ring_ctx *ctx = req->ctx; 141 struct io_cache_entry *entry; 142 struct io_async_msghdr *hdr; 143 144 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 145 entry = io_alloc_cache_get(&ctx->netmsg_cache); 146 if (entry) { 147 hdr = container_of(entry, struct io_async_msghdr, cache); 148 hdr->free_iov = NULL; 149 req->flags |= REQ_F_ASYNC_DATA; 150 req->async_data = hdr; 151 return hdr; 152 } 153 } 154 155 if (!io_alloc_async_data(req)) { 156 hdr = req->async_data; 157 hdr->free_iov = NULL; 158 return hdr; 159 } 160 return NULL; 161 } 162 163 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req) 164 { 165 /* ->prep_async is always called from the submission context */ 166 return io_msg_alloc_async(req, 0); 167 } 168 169 static int io_setup_async_msg(struct io_kiocb *req, 170 struct io_async_msghdr *kmsg, 171 unsigned int issue_flags) 172 { 173 struct io_async_msghdr *async_msg; 174 175 if (req_has_async_data(req)) 176 return -EAGAIN; 177 async_msg = io_msg_alloc_async(req, issue_flags); 178 if (!async_msg) { 179 kfree(kmsg->free_iov); 180 return -ENOMEM; 181 } 182 req->flags |= REQ_F_NEED_CLEANUP; 183 memcpy(async_msg, kmsg, sizeof(*kmsg)); 184 if (async_msg->msg.msg_name) 185 async_msg->msg.msg_name = &async_msg->addr; 186 187 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs) 188 return -EAGAIN; 189 190 /* if were using fast_iov, set it to the new one */ 191 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) { 192 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov; 193 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx]; 194 } 195 196 return -EAGAIN; 197 } 198 199 static int io_sendmsg_copy_hdr(struct io_kiocb *req, 200 struct io_async_msghdr *iomsg) 201 { 202 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 203 int ret; 204 205 iomsg->msg.msg_name = &iomsg->addr; 206 iomsg->free_iov = iomsg->fast_iov; 207 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags, 208 &iomsg->free_iov); 209 /* save msg_control as sys_sendmsg() overwrites it */ 210 sr->msg_control = iomsg->msg.msg_control_user; 211 return ret; 212 } 213 214 int io_send_prep_async(struct io_kiocb *req) 215 { 216 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 217 struct io_async_msghdr *io; 218 int ret; 219 220 if (!zc->addr || req_has_async_data(req)) 221 return 0; 222 io = io_msg_alloc_async_prep(req); 223 if (!io) 224 return -ENOMEM; 225 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); 226 return ret; 227 } 228 229 static int io_setup_async_addr(struct io_kiocb *req, 230 struct sockaddr_storage *addr_storage, 231 unsigned int issue_flags) 232 { 233 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 234 struct io_async_msghdr *io; 235 236 if (!sr->addr || req_has_async_data(req)) 237 return -EAGAIN; 238 io = io_msg_alloc_async(req, issue_flags); 239 if (!io) 240 return -ENOMEM; 241 memcpy(&io->addr, addr_storage, sizeof(io->addr)); 242 return -EAGAIN; 243 } 244 245 int io_sendmsg_prep_async(struct io_kiocb *req) 246 { 247 int ret; 248 249 if (!io_msg_alloc_async_prep(req)) 250 return -ENOMEM; 251 ret = io_sendmsg_copy_hdr(req, req->async_data); 252 if (!ret) 253 req->flags |= REQ_F_NEED_CLEANUP; 254 return ret; 255 } 256 257 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 258 { 259 struct io_async_msghdr *io = req->async_data; 260 261 kfree(io->free_iov); 262 } 263 264 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 265 { 266 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 267 268 if (req->opcode == IORING_OP_SEND) { 269 if (READ_ONCE(sqe->__pad3[0])) 270 return -EINVAL; 271 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 272 sr->addr_len = READ_ONCE(sqe->addr_len); 273 } else if (sqe->addr2 || sqe->file_index) { 274 return -EINVAL; 275 } 276 277 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 278 sr->len = READ_ONCE(sqe->len); 279 sr->flags = READ_ONCE(sqe->ioprio); 280 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) 281 return -EINVAL; 282 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 283 if (sr->msg_flags & MSG_DONTWAIT) 284 req->flags |= REQ_F_NOWAIT; 285 286 #ifdef CONFIG_COMPAT 287 if (req->ctx->compat) 288 sr->msg_flags |= MSG_CMSG_COMPAT; 289 #endif 290 sr->done_io = 0; 291 return 0; 292 } 293 294 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 295 { 296 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 297 struct io_async_msghdr iomsg, *kmsg; 298 struct socket *sock; 299 unsigned flags; 300 int min_ret = 0; 301 int ret; 302 303 sock = sock_from_file(req->file); 304 if (unlikely(!sock)) 305 return -ENOTSOCK; 306 307 if (req_has_async_data(req)) { 308 kmsg = req->async_data; 309 kmsg->msg.msg_control_user = sr->msg_control; 310 } else { 311 ret = io_sendmsg_copy_hdr(req, &iomsg); 312 if (ret) 313 return ret; 314 kmsg = &iomsg; 315 } 316 317 if (!(req->flags & REQ_F_POLLED) && 318 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 319 return io_setup_async_msg(req, kmsg, issue_flags); 320 321 flags = sr->msg_flags; 322 if (issue_flags & IO_URING_F_NONBLOCK) 323 flags |= MSG_DONTWAIT; 324 if (flags & MSG_WAITALL) 325 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 326 327 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 328 329 if (ret < min_ret) { 330 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 331 return io_setup_async_msg(req, kmsg, issue_flags); 332 if (ret > 0 && io_net_retry(sock, flags)) { 333 kmsg->msg.msg_controllen = 0; 334 kmsg->msg.msg_control = NULL; 335 sr->done_io += ret; 336 req->flags |= REQ_F_PARTIAL_IO; 337 return io_setup_async_msg(req, kmsg, issue_flags); 338 } 339 if (ret == -ERESTARTSYS) 340 ret = -EINTR; 341 req_set_fail(req); 342 } 343 /* fast path, check for non-NULL to avoid function call */ 344 if (kmsg->free_iov) 345 kfree(kmsg->free_iov); 346 req->flags &= ~REQ_F_NEED_CLEANUP; 347 io_netmsg_recycle(req, issue_flags); 348 if (ret >= 0) 349 ret += sr->done_io; 350 else if (sr->done_io) 351 ret = sr->done_io; 352 io_req_set_res(req, ret, 0); 353 return IOU_OK; 354 } 355 356 int io_send(struct io_kiocb *req, unsigned int issue_flags) 357 { 358 struct sockaddr_storage __address; 359 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 360 struct msghdr msg; 361 struct socket *sock; 362 unsigned flags; 363 int min_ret = 0; 364 int ret; 365 366 msg.msg_name = NULL; 367 msg.msg_control = NULL; 368 msg.msg_controllen = 0; 369 msg.msg_namelen = 0; 370 msg.msg_ubuf = NULL; 371 372 if (sr->addr) { 373 if (req_has_async_data(req)) { 374 struct io_async_msghdr *io = req->async_data; 375 376 msg.msg_name = &io->addr; 377 } else { 378 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address); 379 if (unlikely(ret < 0)) 380 return ret; 381 msg.msg_name = (struct sockaddr *)&__address; 382 } 383 msg.msg_namelen = sr->addr_len; 384 } 385 386 if (!(req->flags & REQ_F_POLLED) && 387 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 388 return io_setup_async_addr(req, &__address, issue_flags); 389 390 sock = sock_from_file(req->file); 391 if (unlikely(!sock)) 392 return -ENOTSOCK; 393 394 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter); 395 if (unlikely(ret)) 396 return ret; 397 398 flags = sr->msg_flags; 399 if (issue_flags & IO_URING_F_NONBLOCK) 400 flags |= MSG_DONTWAIT; 401 if (flags & MSG_WAITALL) 402 min_ret = iov_iter_count(&msg.msg_iter); 403 404 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 405 msg.msg_flags = flags; 406 ret = sock_sendmsg(sock, &msg); 407 if (ret < min_ret) { 408 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 409 return io_setup_async_addr(req, &__address, issue_flags); 410 411 if (ret > 0 && io_net_retry(sock, flags)) { 412 sr->len -= ret; 413 sr->buf += ret; 414 sr->done_io += ret; 415 req->flags |= REQ_F_PARTIAL_IO; 416 return io_setup_async_addr(req, &__address, issue_flags); 417 } 418 if (ret == -ERESTARTSYS) 419 ret = -EINTR; 420 req_set_fail(req); 421 } 422 if (ret >= 0) 423 ret += sr->done_io; 424 else if (sr->done_io) 425 ret = sr->done_io; 426 io_req_set_res(req, ret, 0); 427 return IOU_OK; 428 } 429 430 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) 431 { 432 int hdr; 433 434 if (iomsg->namelen < 0) 435 return true; 436 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), 437 iomsg->namelen, &hdr)) 438 return true; 439 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) 440 return true; 441 442 return false; 443 } 444 445 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, 446 struct io_async_msghdr *iomsg) 447 { 448 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 449 struct user_msghdr msg; 450 int ret; 451 452 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) 453 return -EFAULT; 454 455 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 456 if (ret) 457 return ret; 458 459 if (req->flags & REQ_F_BUFFER_SELECT) { 460 if (msg.msg_iovlen == 0) { 461 sr->len = iomsg->fast_iov[0].iov_len = 0; 462 iomsg->fast_iov[0].iov_base = NULL; 463 iomsg->free_iov = NULL; 464 } else if (msg.msg_iovlen > 1) { 465 return -EINVAL; 466 } else { 467 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov))) 468 return -EFAULT; 469 sr->len = iomsg->fast_iov[0].iov_len; 470 iomsg->free_iov = NULL; 471 } 472 473 if (req->flags & REQ_F_APOLL_MULTISHOT) { 474 iomsg->namelen = msg.msg_namelen; 475 iomsg->controllen = msg.msg_controllen; 476 if (io_recvmsg_multishot_overflow(iomsg)) 477 return -EOVERFLOW; 478 } 479 } else { 480 iomsg->free_iov = iomsg->fast_iov; 481 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, 482 &iomsg->free_iov, &iomsg->msg.msg_iter, 483 false); 484 if (ret > 0) 485 ret = 0; 486 } 487 488 return ret; 489 } 490 491 #ifdef CONFIG_COMPAT 492 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, 493 struct io_async_msghdr *iomsg) 494 { 495 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 496 struct compat_msghdr msg; 497 struct compat_iovec __user *uiov; 498 int ret; 499 500 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) 501 return -EFAULT; 502 503 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 504 if (ret) 505 return ret; 506 507 uiov = compat_ptr(msg.msg_iov); 508 if (req->flags & REQ_F_BUFFER_SELECT) { 509 compat_ssize_t clen; 510 511 iomsg->free_iov = NULL; 512 if (msg.msg_iovlen == 0) { 513 sr->len = 0; 514 } else if (msg.msg_iovlen > 1) { 515 return -EINVAL; 516 } else { 517 if (!access_ok(uiov, sizeof(*uiov))) 518 return -EFAULT; 519 if (__get_user(clen, &uiov->iov_len)) 520 return -EFAULT; 521 if (clen < 0) 522 return -EINVAL; 523 sr->len = clen; 524 } 525 526 if (req->flags & REQ_F_APOLL_MULTISHOT) { 527 iomsg->namelen = msg.msg_namelen; 528 iomsg->controllen = msg.msg_controllen; 529 if (io_recvmsg_multishot_overflow(iomsg)) 530 return -EOVERFLOW; 531 } 532 } else { 533 iomsg->free_iov = iomsg->fast_iov; 534 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen, 535 UIO_FASTIOV, &iomsg->free_iov, 536 &iomsg->msg.msg_iter, true); 537 if (ret < 0) 538 return ret; 539 } 540 541 return 0; 542 } 543 #endif 544 545 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 546 struct io_async_msghdr *iomsg) 547 { 548 iomsg->msg.msg_name = &iomsg->addr; 549 iomsg->msg.msg_iter.nr_segs = 0; 550 551 #ifdef CONFIG_COMPAT 552 if (req->ctx->compat) 553 return __io_compat_recvmsg_copy_hdr(req, iomsg); 554 #endif 555 556 return __io_recvmsg_copy_hdr(req, iomsg); 557 } 558 559 int io_recvmsg_prep_async(struct io_kiocb *req) 560 { 561 int ret; 562 563 if (!io_msg_alloc_async_prep(req)) 564 return -ENOMEM; 565 ret = io_recvmsg_copy_hdr(req, req->async_data); 566 if (!ret) 567 req->flags |= REQ_F_NEED_CLEANUP; 568 return ret; 569 } 570 571 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT) 572 573 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 574 { 575 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 576 577 if (unlikely(sqe->file_index || sqe->addr2)) 578 return -EINVAL; 579 580 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 581 sr->len = READ_ONCE(sqe->len); 582 sr->flags = READ_ONCE(sqe->ioprio); 583 if (sr->flags & ~(RECVMSG_FLAGS)) 584 return -EINVAL; 585 sr->msg_flags = READ_ONCE(sqe->msg_flags); 586 if (sr->msg_flags & MSG_DONTWAIT) 587 req->flags |= REQ_F_NOWAIT; 588 if (sr->msg_flags & MSG_ERRQUEUE) 589 req->flags |= REQ_F_CLEAR_POLLIN; 590 if (sr->flags & IORING_RECV_MULTISHOT) { 591 if (!(req->flags & REQ_F_BUFFER_SELECT)) 592 return -EINVAL; 593 if (sr->msg_flags & MSG_WAITALL) 594 return -EINVAL; 595 if (req->opcode == IORING_OP_RECV && sr->len) 596 return -EINVAL; 597 req->flags |= REQ_F_APOLL_MULTISHOT; 598 /* 599 * Store the buffer group for this multishot receive separately, 600 * as if we end up doing an io-wq based issue that selects a 601 * buffer, it has to be committed immediately and that will 602 * clear ->buf_list. This means we lose the link to the buffer 603 * list, and the eventual buffer put on completion then cannot 604 * restore it. 605 */ 606 sr->buf_group = req->buf_index; 607 } 608 609 #ifdef CONFIG_COMPAT 610 if (req->ctx->compat) 611 sr->msg_flags |= MSG_CMSG_COMPAT; 612 #endif 613 sr->done_io = 0; 614 return 0; 615 } 616 617 static inline void io_recv_prep_retry(struct io_kiocb *req) 618 { 619 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 620 621 sr->done_io = 0; 622 sr->len = 0; /* get from the provided buffer */ 623 req->buf_index = sr->buf_group; 624 } 625 626 /* 627 * Finishes io_recv and io_recvmsg. 628 * 629 * Returns true if it is actually finished, or false if it should run 630 * again (for multishot). 631 */ 632 static inline bool io_recv_finish(struct io_kiocb *req, int *ret, 633 struct msghdr *msg, bool mshot_finished, 634 unsigned issue_flags) 635 { 636 unsigned int cflags; 637 638 cflags = io_put_kbuf(req, issue_flags); 639 if (msg->msg_inq && msg->msg_inq != -1) 640 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 641 642 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 643 io_req_set_res(req, *ret, cflags); 644 *ret = IOU_OK; 645 return true; 646 } 647 648 if (mshot_finished) 649 goto finish; 650 651 /* 652 * Fill CQE for this receive and see if we should keep trying to 653 * receive from this socket. 654 */ 655 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, 656 *ret, cflags | IORING_CQE_F_MORE)) { 657 io_recv_prep_retry(req); 658 /* Known not-empty or unknown state, retry */ 659 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) 660 return false; 661 if (issue_flags & IO_URING_F_MULTISHOT) 662 *ret = IOU_ISSUE_SKIP_COMPLETE; 663 else 664 *ret = -EAGAIN; 665 return true; 666 } 667 /* Otherwise stop multishot but use the current result. */ 668 finish: 669 io_req_set_res(req, *ret, cflags); 670 671 if (issue_flags & IO_URING_F_MULTISHOT) 672 *ret = IOU_STOP_MULTISHOT; 673 else 674 *ret = IOU_OK; 675 return true; 676 } 677 678 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 679 struct io_sr_msg *sr, void __user **buf, 680 size_t *len) 681 { 682 unsigned long ubuf = (unsigned long) *buf; 683 unsigned long hdr; 684 685 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 686 kmsg->controllen; 687 if (*len < hdr) 688 return -EFAULT; 689 690 if (kmsg->controllen) { 691 unsigned long control = ubuf + hdr - kmsg->controllen; 692 693 kmsg->msg.msg_control_user = (void __user *) control; 694 kmsg->msg.msg_controllen = kmsg->controllen; 695 } 696 697 sr->buf = *buf; /* stash for later copy */ 698 *buf = (void __user *) (ubuf + hdr); 699 kmsg->payloadlen = *len = *len - hdr; 700 return 0; 701 } 702 703 struct io_recvmsg_multishot_hdr { 704 struct io_uring_recvmsg_out msg; 705 struct sockaddr_storage addr; 706 }; 707 708 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 709 struct io_async_msghdr *kmsg, 710 unsigned int flags, bool *finished) 711 { 712 int err; 713 int copy_len; 714 struct io_recvmsg_multishot_hdr hdr; 715 716 if (kmsg->namelen) 717 kmsg->msg.msg_name = &hdr.addr; 718 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 719 kmsg->msg.msg_namelen = 0; 720 721 if (sock->file->f_flags & O_NONBLOCK) 722 flags |= MSG_DONTWAIT; 723 724 err = sock_recvmsg(sock, &kmsg->msg, flags); 725 *finished = err <= 0; 726 if (err < 0) 727 return err; 728 729 hdr.msg = (struct io_uring_recvmsg_out) { 730 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 731 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 732 }; 733 734 hdr.msg.payloadlen = err; 735 if (err > kmsg->payloadlen) 736 err = kmsg->payloadlen; 737 738 copy_len = sizeof(struct io_uring_recvmsg_out); 739 if (kmsg->msg.msg_namelen > kmsg->namelen) 740 copy_len += kmsg->namelen; 741 else 742 copy_len += kmsg->msg.msg_namelen; 743 744 /* 745 * "fromlen shall refer to the value before truncation.." 746 * 1003.1g 747 */ 748 hdr.msg.namelen = kmsg->msg.msg_namelen; 749 750 /* ensure that there is no gap between hdr and sockaddr_storage */ 751 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 752 sizeof(struct io_uring_recvmsg_out)); 753 if (copy_to_user(io->buf, &hdr, copy_len)) { 754 *finished = true; 755 return -EFAULT; 756 } 757 758 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 759 kmsg->controllen + err; 760 } 761 762 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 763 { 764 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 765 struct io_async_msghdr iomsg, *kmsg; 766 struct socket *sock; 767 unsigned flags; 768 int ret, min_ret = 0; 769 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 770 bool mshot_finished = true; 771 772 sock = sock_from_file(req->file); 773 if (unlikely(!sock)) 774 return -ENOTSOCK; 775 776 if (req_has_async_data(req)) { 777 kmsg = req->async_data; 778 } else { 779 ret = io_recvmsg_copy_hdr(req, &iomsg); 780 if (ret) 781 return ret; 782 kmsg = &iomsg; 783 } 784 785 if (!(req->flags & REQ_F_POLLED) && 786 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 787 return io_setup_async_msg(req, kmsg, issue_flags); 788 789 if (!io_check_multishot(req, issue_flags)) 790 return io_setup_async_msg(req, kmsg, issue_flags); 791 792 retry_multishot: 793 if (io_do_buffer_select(req)) { 794 void __user *buf; 795 size_t len = sr->len; 796 797 buf = io_buffer_select(req, &len, issue_flags); 798 if (!buf) 799 return -ENOBUFS; 800 801 if (req->flags & REQ_F_APOLL_MULTISHOT) { 802 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); 803 if (ret) { 804 io_kbuf_recycle(req, issue_flags); 805 return ret; 806 } 807 } 808 809 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); 810 } 811 812 flags = sr->msg_flags; 813 if (force_nonblock) 814 flags |= MSG_DONTWAIT; 815 816 kmsg->msg.msg_get_inq = 1; 817 kmsg->msg.msg_inq = -1; 818 if (req->flags & REQ_F_APOLL_MULTISHOT) { 819 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 820 &mshot_finished); 821 } else { 822 /* disable partial retry for recvmsg with cmsg attached */ 823 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) 824 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 825 826 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 827 kmsg->uaddr, flags); 828 } 829 830 if (ret < min_ret) { 831 if (ret == -EAGAIN && force_nonblock) { 832 ret = io_setup_async_msg(req, kmsg, issue_flags); 833 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) { 834 io_kbuf_recycle(req, issue_flags); 835 return IOU_ISSUE_SKIP_COMPLETE; 836 } 837 return ret; 838 } 839 if (ret > 0 && io_net_retry(sock, flags)) { 840 sr->done_io += ret; 841 req->flags |= REQ_F_PARTIAL_IO; 842 return io_setup_async_msg(req, kmsg, issue_flags); 843 } 844 if (ret == -ERESTARTSYS) 845 ret = -EINTR; 846 req_set_fail(req); 847 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 848 req_set_fail(req); 849 } 850 851 if (ret > 0) 852 ret += sr->done_io; 853 else if (sr->done_io) 854 ret = sr->done_io; 855 else 856 io_kbuf_recycle(req, issue_flags); 857 858 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags)) 859 goto retry_multishot; 860 861 if (mshot_finished) { 862 /* fast path, check for non-NULL to avoid function call */ 863 if (kmsg->free_iov) 864 kfree(kmsg->free_iov); 865 io_netmsg_recycle(req, issue_flags); 866 req->flags &= ~REQ_F_NEED_CLEANUP; 867 } 868 869 return ret; 870 } 871 872 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 873 { 874 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 875 struct msghdr msg; 876 struct socket *sock; 877 unsigned flags; 878 int ret, min_ret = 0; 879 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 880 size_t len = sr->len; 881 882 if (!(req->flags & REQ_F_POLLED) && 883 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 884 return -EAGAIN; 885 886 if (!io_check_multishot(req, issue_flags)) 887 return -EAGAIN; 888 889 sock = sock_from_file(req->file); 890 if (unlikely(!sock)) 891 return -ENOTSOCK; 892 893 msg.msg_name = NULL; 894 msg.msg_namelen = 0; 895 msg.msg_control = NULL; 896 msg.msg_get_inq = 1; 897 msg.msg_controllen = 0; 898 msg.msg_iocb = NULL; 899 msg.msg_ubuf = NULL; 900 901 retry_multishot: 902 if (io_do_buffer_select(req)) { 903 void __user *buf; 904 905 buf = io_buffer_select(req, &len, issue_flags); 906 if (!buf) 907 return -ENOBUFS; 908 sr->buf = buf; 909 sr->len = len; 910 } 911 912 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter); 913 if (unlikely(ret)) 914 goto out_free; 915 916 msg.msg_inq = -1; 917 msg.msg_flags = 0; 918 919 flags = sr->msg_flags; 920 if (force_nonblock) 921 flags |= MSG_DONTWAIT; 922 if (flags & MSG_WAITALL) 923 min_ret = iov_iter_count(&msg.msg_iter); 924 925 ret = sock_recvmsg(sock, &msg, flags); 926 if (ret < min_ret) { 927 if (ret == -EAGAIN && force_nonblock) { 928 if (issue_flags & IO_URING_F_MULTISHOT) { 929 io_kbuf_recycle(req, issue_flags); 930 return IOU_ISSUE_SKIP_COMPLETE; 931 } 932 933 return -EAGAIN; 934 } 935 if (ret > 0 && io_net_retry(sock, flags)) { 936 sr->len -= ret; 937 sr->buf += ret; 938 sr->done_io += ret; 939 req->flags |= REQ_F_PARTIAL_IO; 940 return -EAGAIN; 941 } 942 if (ret == -ERESTARTSYS) 943 ret = -EINTR; 944 req_set_fail(req); 945 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 946 out_free: 947 req_set_fail(req); 948 } 949 950 if (ret > 0) 951 ret += sr->done_io; 952 else if (sr->done_io) 953 ret = sr->done_io; 954 else 955 io_kbuf_recycle(req, issue_flags); 956 957 if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags)) 958 goto retry_multishot; 959 960 return ret; 961 } 962 963 void io_send_zc_cleanup(struct io_kiocb *req) 964 { 965 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 966 struct io_async_msghdr *io; 967 968 if (req_has_async_data(req)) { 969 io = req->async_data; 970 /* might be ->fast_iov if *msg_copy_hdr failed */ 971 if (io->free_iov != io->fast_iov) 972 kfree(io->free_iov); 973 } 974 if (zc->notif) { 975 io_notif_flush(zc->notif); 976 zc->notif = NULL; 977 } 978 } 979 980 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 981 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 982 983 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 984 { 985 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 986 struct io_ring_ctx *ctx = req->ctx; 987 struct io_kiocb *notif; 988 989 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 990 return -EINVAL; 991 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 992 if (req->flags & REQ_F_CQE_SKIP) 993 return -EINVAL; 994 995 notif = zc->notif = io_alloc_notif(ctx); 996 if (!notif) 997 return -ENOMEM; 998 notif->cqe.user_data = req->cqe.user_data; 999 notif->cqe.res = 0; 1000 notif->cqe.flags = IORING_CQE_F_NOTIF; 1001 req->flags |= REQ_F_NEED_CLEANUP; 1002 1003 zc->flags = READ_ONCE(sqe->ioprio); 1004 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 1005 if (zc->flags & ~IO_ZC_FLAGS_VALID) 1006 return -EINVAL; 1007 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 1008 io_notif_set_extended(notif); 1009 io_notif_to_data(notif)->zc_report = true; 1010 } 1011 } 1012 1013 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1014 unsigned idx = READ_ONCE(sqe->buf_index); 1015 1016 if (unlikely(idx >= ctx->nr_user_bufs)) 1017 return -EFAULT; 1018 idx = array_index_nospec(idx, ctx->nr_user_bufs); 1019 req->imu = READ_ONCE(ctx->user_bufs[idx]); 1020 io_req_set_rsrc_node(notif, ctx, 0); 1021 } 1022 1023 if (req->opcode == IORING_OP_SEND_ZC) { 1024 if (READ_ONCE(sqe->__pad3[0])) 1025 return -EINVAL; 1026 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1027 zc->addr_len = READ_ONCE(sqe->addr_len); 1028 } else { 1029 if (unlikely(sqe->addr2 || sqe->file_index)) 1030 return -EINVAL; 1031 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF)) 1032 return -EINVAL; 1033 } 1034 1035 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1036 zc->len = READ_ONCE(sqe->len); 1037 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 1038 if (zc->msg_flags & MSG_DONTWAIT) 1039 req->flags |= REQ_F_NOWAIT; 1040 1041 zc->done_io = 0; 1042 1043 #ifdef CONFIG_COMPAT 1044 if (req->ctx->compat) 1045 zc->msg_flags |= MSG_CMSG_COMPAT; 1046 #endif 1047 return 0; 1048 } 1049 1050 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, 1051 struct iov_iter *from, size_t length) 1052 { 1053 skb_zcopy_downgrade_managed(skb); 1054 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1055 } 1056 1057 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, 1058 struct iov_iter *from, size_t length) 1059 { 1060 struct skb_shared_info *shinfo = skb_shinfo(skb); 1061 int frag = shinfo->nr_frags; 1062 int ret = 0; 1063 struct bvec_iter bi; 1064 ssize_t copied = 0; 1065 unsigned long truesize = 0; 1066 1067 if (!frag) 1068 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1069 else if (unlikely(!skb_zcopy_managed(skb))) 1070 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1071 1072 bi.bi_size = min(from->count, length); 1073 bi.bi_bvec_done = from->iov_offset; 1074 bi.bi_idx = 0; 1075 1076 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1077 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1078 1079 copied += v.bv_len; 1080 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1081 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1082 v.bv_offset, v.bv_len); 1083 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1084 } 1085 if (bi.bi_size) 1086 ret = -EMSGSIZE; 1087 1088 shinfo->nr_frags = frag; 1089 from->bvec += bi.bi_idx; 1090 from->nr_segs -= bi.bi_idx; 1091 from->count -= copied; 1092 from->iov_offset = bi.bi_bvec_done; 1093 1094 skb->data_len += copied; 1095 skb->len += copied; 1096 skb->truesize += truesize; 1097 1098 if (sk && sk->sk_type == SOCK_STREAM) { 1099 sk_wmem_queued_add(sk, truesize); 1100 if (!skb_zcopy_pure(skb)) 1101 sk_mem_charge(sk, truesize); 1102 } else { 1103 refcount_add(truesize, &skb->sk->sk_wmem_alloc); 1104 } 1105 return ret; 1106 } 1107 1108 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1109 { 1110 struct sockaddr_storage __address; 1111 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1112 struct msghdr msg; 1113 struct socket *sock; 1114 unsigned msg_flags; 1115 int ret, min_ret = 0; 1116 1117 sock = sock_from_file(req->file); 1118 if (unlikely(!sock)) 1119 return -ENOTSOCK; 1120 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1121 return -EOPNOTSUPP; 1122 1123 msg.msg_name = NULL; 1124 msg.msg_control = NULL; 1125 msg.msg_controllen = 0; 1126 msg.msg_namelen = 0; 1127 1128 if (zc->addr) { 1129 if (req_has_async_data(req)) { 1130 struct io_async_msghdr *io = req->async_data; 1131 1132 msg.msg_name = &io->addr; 1133 } else { 1134 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address); 1135 if (unlikely(ret < 0)) 1136 return ret; 1137 msg.msg_name = (struct sockaddr *)&__address; 1138 } 1139 msg.msg_namelen = zc->addr_len; 1140 } 1141 1142 if (!(req->flags & REQ_F_POLLED) && 1143 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1144 return io_setup_async_addr(req, &__address, issue_flags); 1145 1146 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1147 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu, 1148 (u64)(uintptr_t)zc->buf, zc->len); 1149 if (unlikely(ret)) 1150 return ret; 1151 msg.sg_from_iter = io_sg_from_iter; 1152 } else { 1153 io_notif_set_extended(zc->notif); 1154 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter); 1155 if (unlikely(ret)) 1156 return ret; 1157 ret = io_notif_account_mem(zc->notif, zc->len); 1158 if (unlikely(ret)) 1159 return ret; 1160 msg.sg_from_iter = io_sg_from_iter_iovec; 1161 } 1162 1163 msg_flags = zc->msg_flags | MSG_ZEROCOPY; 1164 if (issue_flags & IO_URING_F_NONBLOCK) 1165 msg_flags |= MSG_DONTWAIT; 1166 if (msg_flags & MSG_WAITALL) 1167 min_ret = iov_iter_count(&msg.msg_iter); 1168 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 1169 1170 msg.msg_flags = msg_flags; 1171 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1172 ret = sock_sendmsg(sock, &msg); 1173 1174 if (unlikely(ret < min_ret)) { 1175 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1176 return io_setup_async_addr(req, &__address, issue_flags); 1177 1178 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { 1179 zc->len -= ret; 1180 zc->buf += ret; 1181 zc->done_io += ret; 1182 req->flags |= REQ_F_PARTIAL_IO; 1183 return io_setup_async_addr(req, &__address, issue_flags); 1184 } 1185 if (ret == -ERESTARTSYS) 1186 ret = -EINTR; 1187 req_set_fail(req); 1188 } 1189 1190 if (ret >= 0) 1191 ret += zc->done_io; 1192 else if (zc->done_io) 1193 ret = zc->done_io; 1194 1195 /* 1196 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1197 * flushing notif to io_send_zc_cleanup() 1198 */ 1199 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1200 io_notif_flush(zc->notif); 1201 req->flags &= ~REQ_F_NEED_CLEANUP; 1202 } 1203 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1204 return IOU_OK; 1205 } 1206 1207 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1208 { 1209 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1210 struct io_async_msghdr iomsg, *kmsg; 1211 struct socket *sock; 1212 unsigned flags; 1213 int ret, min_ret = 0; 1214 1215 io_notif_set_extended(sr->notif); 1216 1217 sock = sock_from_file(req->file); 1218 if (unlikely(!sock)) 1219 return -ENOTSOCK; 1220 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1221 return -EOPNOTSUPP; 1222 1223 if (req_has_async_data(req)) { 1224 kmsg = req->async_data; 1225 } else { 1226 ret = io_sendmsg_copy_hdr(req, &iomsg); 1227 if (ret) 1228 return ret; 1229 kmsg = &iomsg; 1230 } 1231 1232 if (!(req->flags & REQ_F_POLLED) && 1233 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1234 return io_setup_async_msg(req, kmsg, issue_flags); 1235 1236 flags = sr->msg_flags | MSG_ZEROCOPY; 1237 if (issue_flags & IO_URING_F_NONBLOCK) 1238 flags |= MSG_DONTWAIT; 1239 if (flags & MSG_WAITALL) 1240 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1241 1242 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1243 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1244 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1245 1246 if (unlikely(ret < min_ret)) { 1247 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1248 return io_setup_async_msg(req, kmsg, issue_flags); 1249 1250 if (ret > 0 && io_net_retry(sock, flags)) { 1251 sr->done_io += ret; 1252 req->flags |= REQ_F_PARTIAL_IO; 1253 return io_setup_async_msg(req, kmsg, issue_flags); 1254 } 1255 if (ret == -ERESTARTSYS) 1256 ret = -EINTR; 1257 req_set_fail(req); 1258 } 1259 /* fast path, check for non-NULL to avoid function call */ 1260 if (kmsg->free_iov) { 1261 kfree(kmsg->free_iov); 1262 kmsg->free_iov = NULL; 1263 } 1264 1265 io_netmsg_recycle(req, issue_flags); 1266 if (ret >= 0) 1267 ret += sr->done_io; 1268 else if (sr->done_io) 1269 ret = sr->done_io; 1270 1271 /* 1272 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1273 * flushing notif to io_send_zc_cleanup() 1274 */ 1275 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1276 io_notif_flush(sr->notif); 1277 req->flags &= ~REQ_F_NEED_CLEANUP; 1278 } 1279 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1280 return IOU_OK; 1281 } 1282 1283 void io_sendrecv_fail(struct io_kiocb *req) 1284 { 1285 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1286 1287 if (req->flags & REQ_F_PARTIAL_IO) 1288 req->cqe.res = sr->done_io; 1289 1290 if ((req->flags & REQ_F_NEED_CLEANUP) && 1291 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1292 req->cqe.flags |= IORING_CQE_F_MORE; 1293 } 1294 1295 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1296 { 1297 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1298 unsigned flags; 1299 1300 if (sqe->len || sqe->buf_index) 1301 return -EINVAL; 1302 1303 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1304 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1305 accept->flags = READ_ONCE(sqe->accept_flags); 1306 accept->nofile = rlimit(RLIMIT_NOFILE); 1307 flags = READ_ONCE(sqe->ioprio); 1308 if (flags & ~IORING_ACCEPT_MULTISHOT) 1309 return -EINVAL; 1310 1311 accept->file_slot = READ_ONCE(sqe->file_index); 1312 if (accept->file_slot) { 1313 if (accept->flags & SOCK_CLOEXEC) 1314 return -EINVAL; 1315 if (flags & IORING_ACCEPT_MULTISHOT && 1316 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1317 return -EINVAL; 1318 } 1319 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1320 return -EINVAL; 1321 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1322 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1323 if (flags & IORING_ACCEPT_MULTISHOT) 1324 req->flags |= REQ_F_APOLL_MULTISHOT; 1325 return 0; 1326 } 1327 1328 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1329 { 1330 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1331 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1332 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; 1333 bool fixed = !!accept->file_slot; 1334 struct file *file; 1335 int ret, fd; 1336 1337 if (!io_check_multishot(req, issue_flags)) 1338 return -EAGAIN; 1339 retry: 1340 if (!fixed) { 1341 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1342 if (unlikely(fd < 0)) 1343 return fd; 1344 } 1345 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, 1346 accept->flags); 1347 if (IS_ERR(file)) { 1348 if (!fixed) 1349 put_unused_fd(fd); 1350 ret = PTR_ERR(file); 1351 if (ret == -EAGAIN && force_nonblock) { 1352 /* 1353 * if it's multishot and polled, we don't need to 1354 * return EAGAIN to arm the poll infra since it 1355 * has already been done 1356 */ 1357 if (issue_flags & IO_URING_F_MULTISHOT) 1358 ret = IOU_ISSUE_SKIP_COMPLETE; 1359 return ret; 1360 } 1361 if (ret == -ERESTARTSYS) 1362 ret = -EINTR; 1363 req_set_fail(req); 1364 } else if (!fixed) { 1365 fd_install(fd, file); 1366 ret = fd; 1367 } else { 1368 ret = io_fixed_fd_install(req, issue_flags, file, 1369 accept->file_slot); 1370 } 1371 1372 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1373 io_req_set_res(req, ret, 0); 1374 return IOU_OK; 1375 } 1376 1377 if (ret < 0) 1378 return ret; 1379 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, 1380 ret, IORING_CQE_F_MORE)) 1381 goto retry; 1382 1383 return -ECANCELED; 1384 } 1385 1386 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1387 { 1388 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1389 1390 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1391 return -EINVAL; 1392 1393 sock->domain = READ_ONCE(sqe->fd); 1394 sock->type = READ_ONCE(sqe->off); 1395 sock->protocol = READ_ONCE(sqe->len); 1396 sock->file_slot = READ_ONCE(sqe->file_index); 1397 sock->nofile = rlimit(RLIMIT_NOFILE); 1398 1399 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1400 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1401 return -EINVAL; 1402 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1403 return -EINVAL; 1404 return 0; 1405 } 1406 1407 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1408 { 1409 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1410 bool fixed = !!sock->file_slot; 1411 struct file *file; 1412 int ret, fd; 1413 1414 if (!fixed) { 1415 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1416 if (unlikely(fd < 0)) 1417 return fd; 1418 } 1419 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1420 if (IS_ERR(file)) { 1421 if (!fixed) 1422 put_unused_fd(fd); 1423 ret = PTR_ERR(file); 1424 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1425 return -EAGAIN; 1426 if (ret == -ERESTARTSYS) 1427 ret = -EINTR; 1428 req_set_fail(req); 1429 } else if (!fixed) { 1430 fd_install(fd, file); 1431 ret = fd; 1432 } else { 1433 ret = io_fixed_fd_install(req, issue_flags, file, 1434 sock->file_slot); 1435 } 1436 io_req_set_res(req, ret, 0); 1437 return IOU_OK; 1438 } 1439 1440 int io_connect_prep_async(struct io_kiocb *req) 1441 { 1442 struct io_async_connect *io = req->async_data; 1443 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1444 1445 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address); 1446 } 1447 1448 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1449 { 1450 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1451 1452 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1453 return -EINVAL; 1454 1455 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1456 conn->addr_len = READ_ONCE(sqe->addr2); 1457 conn->in_progress = conn->seen_econnaborted = false; 1458 return 0; 1459 } 1460 1461 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1462 { 1463 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1464 struct io_async_connect __io, *io; 1465 unsigned file_flags; 1466 int ret; 1467 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1468 1469 if (req_has_async_data(req)) { 1470 io = req->async_data; 1471 } else { 1472 ret = move_addr_to_kernel(connect->addr, 1473 connect->addr_len, 1474 &__io.address); 1475 if (ret) 1476 goto out; 1477 io = &__io; 1478 } 1479 1480 file_flags = force_nonblock ? O_NONBLOCK : 0; 1481 1482 ret = __sys_connect_file(req->file, &io->address, 1483 connect->addr_len, file_flags); 1484 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1485 && force_nonblock) { 1486 if (ret == -EINPROGRESS) { 1487 connect->in_progress = true; 1488 } else if (ret == -ECONNABORTED) { 1489 if (connect->seen_econnaborted) 1490 goto out; 1491 connect->seen_econnaborted = true; 1492 } 1493 if (req_has_async_data(req)) 1494 return -EAGAIN; 1495 if (io_alloc_async_data(req)) { 1496 ret = -ENOMEM; 1497 goto out; 1498 } 1499 memcpy(req->async_data, &__io, sizeof(__io)); 1500 return -EAGAIN; 1501 } 1502 if (connect->in_progress) { 1503 /* 1504 * At least bluetooth will return -EBADFD on a re-connect 1505 * attempt, and it's (supposedly) also valid to get -EISCONN 1506 * which means the previous result is good. For both of these, 1507 * grab the sock_error() and use that for the completion. 1508 */ 1509 if (ret == -EBADFD || ret == -EISCONN) 1510 ret = sock_error(sock_from_file(req->file)->sk); 1511 } 1512 if (ret == -ERESTARTSYS) 1513 ret = -EINTR; 1514 out: 1515 if (ret < 0) 1516 req_set_fail(req); 1517 io_req_set_res(req, ret, 0); 1518 return IOU_OK; 1519 } 1520 1521 void io_netmsg_cache_free(struct io_cache_entry *entry) 1522 { 1523 kfree(container_of(entry, struct io_async_msghdr, cache)); 1524 } 1525 #endif 1526