1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "io_uring.h" 14 #include "kbuf.h" 15 #include "alloc_cache.h" 16 #include "net.h" 17 #include "notif.h" 18 #include "rsrc.h" 19 20 #if defined(CONFIG_NET) 21 struct io_shutdown { 22 struct file *file; 23 int how; 24 }; 25 26 struct io_accept { 27 struct file *file; 28 struct sockaddr __user *addr; 29 int __user *addr_len; 30 int flags; 31 u32 file_slot; 32 unsigned long nofile; 33 }; 34 35 struct io_socket { 36 struct file *file; 37 int domain; 38 int type; 39 int protocol; 40 int flags; 41 u32 file_slot; 42 unsigned long nofile; 43 }; 44 45 struct io_connect { 46 struct file *file; 47 struct sockaddr __user *addr; 48 int addr_len; 49 bool in_progress; 50 bool seen_econnaborted; 51 }; 52 53 struct io_sr_msg { 54 struct file *file; 55 union { 56 struct compat_msghdr __user *umsg_compat; 57 struct user_msghdr __user *umsg; 58 void __user *buf; 59 }; 60 unsigned len; 61 unsigned done_io; 62 unsigned msg_flags; 63 u16 flags; 64 /* initialised and used only by !msg send variants */ 65 u16 addr_len; 66 u16 buf_group; 67 void __user *addr; 68 void __user *msg_control; 69 /* used only for send zerocopy */ 70 struct io_kiocb *notif; 71 }; 72 73 static inline bool io_check_multishot(struct io_kiocb *req, 74 unsigned int issue_flags) 75 { 76 /* 77 * When ->locked_cq is set we only allow to post CQEs from the original 78 * task context. Usual request completions will be handled in other 79 * generic paths but multipoll may decide to post extra cqes. 80 */ 81 return !(issue_flags & IO_URING_F_IOWQ) || 82 !(issue_flags & IO_URING_F_MULTISHOT) || 83 !req->ctx->task_complete; 84 } 85 86 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 87 { 88 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 89 90 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 91 sqe->buf_index || sqe->splice_fd_in)) 92 return -EINVAL; 93 94 shutdown->how = READ_ONCE(sqe->len); 95 req->flags |= REQ_F_FORCE_ASYNC; 96 return 0; 97 } 98 99 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 100 { 101 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 102 struct socket *sock; 103 int ret; 104 105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 106 107 sock = sock_from_file(req->file); 108 if (unlikely(!sock)) 109 return -ENOTSOCK; 110 111 ret = __sys_shutdown_sock(sock, shutdown->how); 112 io_req_set_res(req, ret, 0); 113 return IOU_OK; 114 } 115 116 static bool io_net_retry(struct socket *sock, int flags) 117 { 118 if (!(flags & MSG_WAITALL)) 119 return false; 120 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 121 } 122 123 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 124 { 125 struct io_async_msghdr *hdr = req->async_data; 126 127 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) 128 return; 129 130 /* Let normal cleanup path reap it if we fail adding to the cache */ 131 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { 132 req->async_data = NULL; 133 req->flags &= ~REQ_F_ASYNC_DATA; 134 } 135 } 136 137 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, 138 unsigned int issue_flags) 139 { 140 struct io_ring_ctx *ctx = req->ctx; 141 struct io_cache_entry *entry; 142 struct io_async_msghdr *hdr; 143 144 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 145 entry = io_alloc_cache_get(&ctx->netmsg_cache); 146 if (entry) { 147 hdr = container_of(entry, struct io_async_msghdr, cache); 148 hdr->free_iov = NULL; 149 req->flags |= REQ_F_ASYNC_DATA; 150 req->async_data = hdr; 151 return hdr; 152 } 153 } 154 155 if (!io_alloc_async_data(req)) { 156 hdr = req->async_data; 157 hdr->free_iov = NULL; 158 return hdr; 159 } 160 return NULL; 161 } 162 163 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req) 164 { 165 /* ->prep_async is always called from the submission context */ 166 return io_msg_alloc_async(req, 0); 167 } 168 169 static int io_setup_async_msg(struct io_kiocb *req, 170 struct io_async_msghdr *kmsg, 171 unsigned int issue_flags) 172 { 173 struct io_async_msghdr *async_msg; 174 175 if (req_has_async_data(req)) 176 return -EAGAIN; 177 async_msg = io_msg_alloc_async(req, issue_flags); 178 if (!async_msg) { 179 kfree(kmsg->free_iov); 180 return -ENOMEM; 181 } 182 req->flags |= REQ_F_NEED_CLEANUP; 183 memcpy(async_msg, kmsg, sizeof(*kmsg)); 184 if (async_msg->msg.msg_name) 185 async_msg->msg.msg_name = &async_msg->addr; 186 /* if were using fast_iov, set it to the new one */ 187 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) { 188 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov; 189 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx]; 190 } 191 192 return -EAGAIN; 193 } 194 195 static int io_sendmsg_copy_hdr(struct io_kiocb *req, 196 struct io_async_msghdr *iomsg) 197 { 198 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 199 int ret; 200 201 iomsg->msg.msg_name = &iomsg->addr; 202 iomsg->free_iov = iomsg->fast_iov; 203 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags, 204 &iomsg->free_iov); 205 /* save msg_control as sys_sendmsg() overwrites it */ 206 sr->msg_control = iomsg->msg.msg_control_user; 207 return ret; 208 } 209 210 int io_send_prep_async(struct io_kiocb *req) 211 { 212 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 213 struct io_async_msghdr *io; 214 int ret; 215 216 if (!zc->addr || req_has_async_data(req)) 217 return 0; 218 io = io_msg_alloc_async_prep(req); 219 if (!io) 220 return -ENOMEM; 221 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); 222 return ret; 223 } 224 225 static int io_setup_async_addr(struct io_kiocb *req, 226 struct sockaddr_storage *addr_storage, 227 unsigned int issue_flags) 228 { 229 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 230 struct io_async_msghdr *io; 231 232 if (!sr->addr || req_has_async_data(req)) 233 return -EAGAIN; 234 io = io_msg_alloc_async(req, issue_flags); 235 if (!io) 236 return -ENOMEM; 237 memcpy(&io->addr, addr_storage, sizeof(io->addr)); 238 return -EAGAIN; 239 } 240 241 int io_sendmsg_prep_async(struct io_kiocb *req) 242 { 243 int ret; 244 245 if (!io_msg_alloc_async_prep(req)) 246 return -ENOMEM; 247 ret = io_sendmsg_copy_hdr(req, req->async_data); 248 if (!ret) 249 req->flags |= REQ_F_NEED_CLEANUP; 250 return ret; 251 } 252 253 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 254 { 255 struct io_async_msghdr *io = req->async_data; 256 257 kfree(io->free_iov); 258 } 259 260 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 261 { 262 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 263 264 if (req->opcode == IORING_OP_SEND) { 265 if (READ_ONCE(sqe->__pad3[0])) 266 return -EINVAL; 267 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 268 sr->addr_len = READ_ONCE(sqe->addr_len); 269 } else if (sqe->addr2 || sqe->file_index) { 270 return -EINVAL; 271 } 272 273 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 274 sr->len = READ_ONCE(sqe->len); 275 sr->flags = READ_ONCE(sqe->ioprio); 276 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) 277 return -EINVAL; 278 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 279 if (sr->msg_flags & MSG_DONTWAIT) 280 req->flags |= REQ_F_NOWAIT; 281 282 #ifdef CONFIG_COMPAT 283 if (req->ctx->compat) 284 sr->msg_flags |= MSG_CMSG_COMPAT; 285 #endif 286 sr->done_io = 0; 287 return 0; 288 } 289 290 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 291 { 292 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 293 struct io_async_msghdr iomsg, *kmsg; 294 struct socket *sock; 295 unsigned flags; 296 int min_ret = 0; 297 int ret; 298 299 sock = sock_from_file(req->file); 300 if (unlikely(!sock)) 301 return -ENOTSOCK; 302 303 if (req_has_async_data(req)) { 304 kmsg = req->async_data; 305 kmsg->msg.msg_control_user = sr->msg_control; 306 } else { 307 ret = io_sendmsg_copy_hdr(req, &iomsg); 308 if (ret) 309 return ret; 310 kmsg = &iomsg; 311 } 312 313 if (!(req->flags & REQ_F_POLLED) && 314 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 315 return io_setup_async_msg(req, kmsg, issue_flags); 316 317 flags = sr->msg_flags; 318 if (issue_flags & IO_URING_F_NONBLOCK) 319 flags |= MSG_DONTWAIT; 320 if (flags & MSG_WAITALL) 321 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 322 323 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 324 325 if (ret < min_ret) { 326 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 327 return io_setup_async_msg(req, kmsg, issue_flags); 328 if (ret > 0 && io_net_retry(sock, flags)) { 329 kmsg->msg.msg_controllen = 0; 330 kmsg->msg.msg_control = NULL; 331 sr->done_io += ret; 332 req->flags |= REQ_F_PARTIAL_IO; 333 return io_setup_async_msg(req, kmsg, issue_flags); 334 } 335 if (ret == -ERESTARTSYS) 336 ret = -EINTR; 337 req_set_fail(req); 338 } 339 /* fast path, check for non-NULL to avoid function call */ 340 if (kmsg->free_iov) 341 kfree(kmsg->free_iov); 342 req->flags &= ~REQ_F_NEED_CLEANUP; 343 io_netmsg_recycle(req, issue_flags); 344 if (ret >= 0) 345 ret += sr->done_io; 346 else if (sr->done_io) 347 ret = sr->done_io; 348 io_req_set_res(req, ret, 0); 349 return IOU_OK; 350 } 351 352 int io_send(struct io_kiocb *req, unsigned int issue_flags) 353 { 354 struct sockaddr_storage __address; 355 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 356 struct msghdr msg; 357 struct socket *sock; 358 unsigned flags; 359 int min_ret = 0; 360 int ret; 361 362 msg.msg_name = NULL; 363 msg.msg_control = NULL; 364 msg.msg_controllen = 0; 365 msg.msg_namelen = 0; 366 msg.msg_ubuf = NULL; 367 368 if (sr->addr) { 369 if (req_has_async_data(req)) { 370 struct io_async_msghdr *io = req->async_data; 371 372 msg.msg_name = &io->addr; 373 } else { 374 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address); 375 if (unlikely(ret < 0)) 376 return ret; 377 msg.msg_name = (struct sockaddr *)&__address; 378 } 379 msg.msg_namelen = sr->addr_len; 380 } 381 382 if (!(req->flags & REQ_F_POLLED) && 383 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 384 return io_setup_async_addr(req, &__address, issue_flags); 385 386 sock = sock_from_file(req->file); 387 if (unlikely(!sock)) 388 return -ENOTSOCK; 389 390 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter); 391 if (unlikely(ret)) 392 return ret; 393 394 flags = sr->msg_flags; 395 if (issue_flags & IO_URING_F_NONBLOCK) 396 flags |= MSG_DONTWAIT; 397 if (flags & MSG_WAITALL) 398 min_ret = iov_iter_count(&msg.msg_iter); 399 400 msg.msg_flags = flags; 401 ret = sock_sendmsg(sock, &msg); 402 if (ret < min_ret) { 403 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 404 return io_setup_async_addr(req, &__address, issue_flags); 405 406 if (ret > 0 && io_net_retry(sock, flags)) { 407 sr->len -= ret; 408 sr->buf += ret; 409 sr->done_io += ret; 410 req->flags |= REQ_F_PARTIAL_IO; 411 return io_setup_async_addr(req, &__address, issue_flags); 412 } 413 if (ret == -ERESTARTSYS) 414 ret = -EINTR; 415 req_set_fail(req); 416 } 417 if (ret >= 0) 418 ret += sr->done_io; 419 else if (sr->done_io) 420 ret = sr->done_io; 421 io_req_set_res(req, ret, 0); 422 return IOU_OK; 423 } 424 425 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) 426 { 427 int hdr; 428 429 if (iomsg->namelen < 0) 430 return true; 431 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), 432 iomsg->namelen, &hdr)) 433 return true; 434 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) 435 return true; 436 437 return false; 438 } 439 440 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, 441 struct io_async_msghdr *iomsg) 442 { 443 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 444 struct user_msghdr msg; 445 int ret; 446 447 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) 448 return -EFAULT; 449 450 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 451 if (ret) 452 return ret; 453 454 if (req->flags & REQ_F_BUFFER_SELECT) { 455 if (msg.msg_iovlen == 0) { 456 sr->len = iomsg->fast_iov[0].iov_len = 0; 457 iomsg->fast_iov[0].iov_base = NULL; 458 iomsg->free_iov = NULL; 459 } else if (msg.msg_iovlen > 1) { 460 return -EINVAL; 461 } else { 462 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov))) 463 return -EFAULT; 464 sr->len = iomsg->fast_iov[0].iov_len; 465 iomsg->free_iov = NULL; 466 } 467 468 if (req->flags & REQ_F_APOLL_MULTISHOT) { 469 iomsg->namelen = msg.msg_namelen; 470 iomsg->controllen = msg.msg_controllen; 471 if (io_recvmsg_multishot_overflow(iomsg)) 472 return -EOVERFLOW; 473 } 474 } else { 475 iomsg->free_iov = iomsg->fast_iov; 476 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, 477 &iomsg->free_iov, &iomsg->msg.msg_iter, 478 false); 479 if (ret > 0) 480 ret = 0; 481 } 482 483 return ret; 484 } 485 486 #ifdef CONFIG_COMPAT 487 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, 488 struct io_async_msghdr *iomsg) 489 { 490 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 491 struct compat_msghdr msg; 492 struct compat_iovec __user *uiov; 493 int ret; 494 495 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) 496 return -EFAULT; 497 498 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 499 if (ret) 500 return ret; 501 502 uiov = compat_ptr(msg.msg_iov); 503 if (req->flags & REQ_F_BUFFER_SELECT) { 504 compat_ssize_t clen; 505 506 iomsg->free_iov = NULL; 507 if (msg.msg_iovlen == 0) { 508 sr->len = 0; 509 } else if (msg.msg_iovlen > 1) { 510 return -EINVAL; 511 } else { 512 if (!access_ok(uiov, sizeof(*uiov))) 513 return -EFAULT; 514 if (__get_user(clen, &uiov->iov_len)) 515 return -EFAULT; 516 if (clen < 0) 517 return -EINVAL; 518 sr->len = clen; 519 } 520 521 if (req->flags & REQ_F_APOLL_MULTISHOT) { 522 iomsg->namelen = msg.msg_namelen; 523 iomsg->controllen = msg.msg_controllen; 524 if (io_recvmsg_multishot_overflow(iomsg)) 525 return -EOVERFLOW; 526 } 527 } else { 528 iomsg->free_iov = iomsg->fast_iov; 529 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen, 530 UIO_FASTIOV, &iomsg->free_iov, 531 &iomsg->msg.msg_iter, true); 532 if (ret < 0) 533 return ret; 534 } 535 536 return 0; 537 } 538 #endif 539 540 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 541 struct io_async_msghdr *iomsg) 542 { 543 iomsg->msg.msg_name = &iomsg->addr; 544 545 #ifdef CONFIG_COMPAT 546 if (req->ctx->compat) 547 return __io_compat_recvmsg_copy_hdr(req, iomsg); 548 #endif 549 550 return __io_recvmsg_copy_hdr(req, iomsg); 551 } 552 553 int io_recvmsg_prep_async(struct io_kiocb *req) 554 { 555 int ret; 556 557 if (!io_msg_alloc_async_prep(req)) 558 return -ENOMEM; 559 ret = io_recvmsg_copy_hdr(req, req->async_data); 560 if (!ret) 561 req->flags |= REQ_F_NEED_CLEANUP; 562 return ret; 563 } 564 565 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT) 566 567 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 568 { 569 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 570 571 if (unlikely(sqe->file_index || sqe->addr2)) 572 return -EINVAL; 573 574 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 575 sr->len = READ_ONCE(sqe->len); 576 sr->flags = READ_ONCE(sqe->ioprio); 577 if (sr->flags & ~(RECVMSG_FLAGS)) 578 return -EINVAL; 579 sr->msg_flags = READ_ONCE(sqe->msg_flags); 580 if (sr->msg_flags & MSG_DONTWAIT) 581 req->flags |= REQ_F_NOWAIT; 582 if (sr->msg_flags & MSG_ERRQUEUE) 583 req->flags |= REQ_F_CLEAR_POLLIN; 584 if (sr->flags & IORING_RECV_MULTISHOT) { 585 if (!(req->flags & REQ_F_BUFFER_SELECT)) 586 return -EINVAL; 587 if (sr->msg_flags & MSG_WAITALL) 588 return -EINVAL; 589 if (req->opcode == IORING_OP_RECV && sr->len) 590 return -EINVAL; 591 req->flags |= REQ_F_APOLL_MULTISHOT; 592 /* 593 * Store the buffer group for this multishot receive separately, 594 * as if we end up doing an io-wq based issue that selects a 595 * buffer, it has to be committed immediately and that will 596 * clear ->buf_list. This means we lose the link to the buffer 597 * list, and the eventual buffer put on completion then cannot 598 * restore it. 599 */ 600 sr->buf_group = req->buf_index; 601 } 602 603 #ifdef CONFIG_COMPAT 604 if (req->ctx->compat) 605 sr->msg_flags |= MSG_CMSG_COMPAT; 606 #endif 607 sr->done_io = 0; 608 return 0; 609 } 610 611 static inline void io_recv_prep_retry(struct io_kiocb *req) 612 { 613 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 614 615 sr->done_io = 0; 616 sr->len = 0; /* get from the provided buffer */ 617 req->buf_index = sr->buf_group; 618 } 619 620 /* 621 * Finishes io_recv and io_recvmsg. 622 * 623 * Returns true if it is actually finished, or false if it should run 624 * again (for multishot). 625 */ 626 static inline bool io_recv_finish(struct io_kiocb *req, int *ret, 627 struct msghdr *msg, bool mshot_finished, 628 unsigned issue_flags) 629 { 630 unsigned int cflags; 631 632 cflags = io_put_kbuf(req, issue_flags); 633 if (msg->msg_inq && msg->msg_inq != -1U) 634 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 635 636 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 637 io_req_set_res(req, *ret, cflags); 638 *ret = IOU_OK; 639 return true; 640 } 641 642 if (!mshot_finished) { 643 if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, 644 *ret, cflags | IORING_CQE_F_MORE, true)) { 645 io_recv_prep_retry(req); 646 /* Known not-empty or unknown state, retry */ 647 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || 648 msg->msg_inq == -1U) 649 return false; 650 if (issue_flags & IO_URING_F_MULTISHOT) 651 *ret = IOU_ISSUE_SKIP_COMPLETE; 652 else 653 *ret = -EAGAIN; 654 return true; 655 } 656 /* Otherwise stop multishot but use the current result. */ 657 } 658 659 io_req_set_res(req, *ret, cflags); 660 661 if (issue_flags & IO_URING_F_MULTISHOT) 662 *ret = IOU_STOP_MULTISHOT; 663 else 664 *ret = IOU_OK; 665 return true; 666 } 667 668 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 669 struct io_sr_msg *sr, void __user **buf, 670 size_t *len) 671 { 672 unsigned long ubuf = (unsigned long) *buf; 673 unsigned long hdr; 674 675 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 676 kmsg->controllen; 677 if (*len < hdr) 678 return -EFAULT; 679 680 if (kmsg->controllen) { 681 unsigned long control = ubuf + hdr - kmsg->controllen; 682 683 kmsg->msg.msg_control_user = (void __user *) control; 684 kmsg->msg.msg_controllen = kmsg->controllen; 685 } 686 687 sr->buf = *buf; /* stash for later copy */ 688 *buf = (void __user *) (ubuf + hdr); 689 kmsg->payloadlen = *len = *len - hdr; 690 return 0; 691 } 692 693 struct io_recvmsg_multishot_hdr { 694 struct io_uring_recvmsg_out msg; 695 struct sockaddr_storage addr; 696 }; 697 698 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 699 struct io_async_msghdr *kmsg, 700 unsigned int flags, bool *finished) 701 { 702 int err; 703 int copy_len; 704 struct io_recvmsg_multishot_hdr hdr; 705 706 if (kmsg->namelen) 707 kmsg->msg.msg_name = &hdr.addr; 708 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 709 kmsg->msg.msg_namelen = 0; 710 711 if (sock->file->f_flags & O_NONBLOCK) 712 flags |= MSG_DONTWAIT; 713 714 err = sock_recvmsg(sock, &kmsg->msg, flags); 715 *finished = err <= 0; 716 if (err < 0) 717 return err; 718 719 hdr.msg = (struct io_uring_recvmsg_out) { 720 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 721 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 722 }; 723 724 hdr.msg.payloadlen = err; 725 if (err > kmsg->payloadlen) 726 err = kmsg->payloadlen; 727 728 copy_len = sizeof(struct io_uring_recvmsg_out); 729 if (kmsg->msg.msg_namelen > kmsg->namelen) 730 copy_len += kmsg->namelen; 731 else 732 copy_len += kmsg->msg.msg_namelen; 733 734 /* 735 * "fromlen shall refer to the value before truncation.." 736 * 1003.1g 737 */ 738 hdr.msg.namelen = kmsg->msg.msg_namelen; 739 740 /* ensure that there is no gap between hdr and sockaddr_storage */ 741 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 742 sizeof(struct io_uring_recvmsg_out)); 743 if (copy_to_user(io->buf, &hdr, copy_len)) { 744 *finished = true; 745 return -EFAULT; 746 } 747 748 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 749 kmsg->controllen + err; 750 } 751 752 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 753 { 754 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 755 struct io_async_msghdr iomsg, *kmsg; 756 struct socket *sock; 757 unsigned flags; 758 int ret, min_ret = 0; 759 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 760 bool mshot_finished = true; 761 762 sock = sock_from_file(req->file); 763 if (unlikely(!sock)) 764 return -ENOTSOCK; 765 766 if (req_has_async_data(req)) { 767 kmsg = req->async_data; 768 } else { 769 ret = io_recvmsg_copy_hdr(req, &iomsg); 770 if (ret) 771 return ret; 772 kmsg = &iomsg; 773 } 774 775 if (!(req->flags & REQ_F_POLLED) && 776 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 777 return io_setup_async_msg(req, kmsg, issue_flags); 778 779 if (!io_check_multishot(req, issue_flags)) 780 return io_setup_async_msg(req, kmsg, issue_flags); 781 782 retry_multishot: 783 if (io_do_buffer_select(req)) { 784 void __user *buf; 785 size_t len = sr->len; 786 787 buf = io_buffer_select(req, &len, issue_flags); 788 if (!buf) 789 return -ENOBUFS; 790 791 if (req->flags & REQ_F_APOLL_MULTISHOT) { 792 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); 793 if (ret) { 794 io_kbuf_recycle(req, issue_flags); 795 return ret; 796 } 797 } 798 799 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); 800 } 801 802 flags = sr->msg_flags; 803 if (force_nonblock) 804 flags |= MSG_DONTWAIT; 805 806 kmsg->msg.msg_get_inq = 1; 807 kmsg->msg.msg_inq = -1U; 808 if (req->flags & REQ_F_APOLL_MULTISHOT) { 809 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 810 &mshot_finished); 811 } else { 812 /* disable partial retry for recvmsg with cmsg attached */ 813 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) 814 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 815 816 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 817 kmsg->uaddr, flags); 818 } 819 820 if (ret < min_ret) { 821 if (ret == -EAGAIN && force_nonblock) { 822 ret = io_setup_async_msg(req, kmsg, issue_flags); 823 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) { 824 io_kbuf_recycle(req, issue_flags); 825 return IOU_ISSUE_SKIP_COMPLETE; 826 } 827 return ret; 828 } 829 if (ret > 0 && io_net_retry(sock, flags)) { 830 sr->done_io += ret; 831 req->flags |= REQ_F_PARTIAL_IO; 832 return io_setup_async_msg(req, kmsg, issue_flags); 833 } 834 if (ret == -ERESTARTSYS) 835 ret = -EINTR; 836 req_set_fail(req); 837 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 838 req_set_fail(req); 839 } 840 841 if (ret > 0) 842 ret += sr->done_io; 843 else if (sr->done_io) 844 ret = sr->done_io; 845 else 846 io_kbuf_recycle(req, issue_flags); 847 848 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags)) 849 goto retry_multishot; 850 851 if (mshot_finished) { 852 /* fast path, check for non-NULL to avoid function call */ 853 if (kmsg->free_iov) 854 kfree(kmsg->free_iov); 855 io_netmsg_recycle(req, issue_flags); 856 req->flags &= ~REQ_F_NEED_CLEANUP; 857 } 858 859 return ret; 860 } 861 862 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 863 { 864 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 865 struct msghdr msg; 866 struct socket *sock; 867 unsigned flags; 868 int ret, min_ret = 0; 869 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 870 size_t len = sr->len; 871 872 if (!(req->flags & REQ_F_POLLED) && 873 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 874 return -EAGAIN; 875 876 if (!io_check_multishot(req, issue_flags)) 877 return -EAGAIN; 878 879 sock = sock_from_file(req->file); 880 if (unlikely(!sock)) 881 return -ENOTSOCK; 882 883 msg.msg_name = NULL; 884 msg.msg_namelen = 0; 885 msg.msg_control = NULL; 886 msg.msg_get_inq = 1; 887 msg.msg_controllen = 0; 888 msg.msg_iocb = NULL; 889 msg.msg_ubuf = NULL; 890 891 retry_multishot: 892 if (io_do_buffer_select(req)) { 893 void __user *buf; 894 895 buf = io_buffer_select(req, &len, issue_flags); 896 if (!buf) 897 return -ENOBUFS; 898 sr->buf = buf; 899 } 900 901 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter); 902 if (unlikely(ret)) 903 goto out_free; 904 905 msg.msg_inq = -1U; 906 msg.msg_flags = 0; 907 908 flags = sr->msg_flags; 909 if (force_nonblock) 910 flags |= MSG_DONTWAIT; 911 if (flags & MSG_WAITALL) 912 min_ret = iov_iter_count(&msg.msg_iter); 913 914 ret = sock_recvmsg(sock, &msg, flags); 915 if (ret < min_ret) { 916 if (ret == -EAGAIN && force_nonblock) { 917 if (issue_flags & IO_URING_F_MULTISHOT) { 918 io_kbuf_recycle(req, issue_flags); 919 return IOU_ISSUE_SKIP_COMPLETE; 920 } 921 922 return -EAGAIN; 923 } 924 if (ret > 0 && io_net_retry(sock, flags)) { 925 sr->len -= ret; 926 sr->buf += ret; 927 sr->done_io += ret; 928 req->flags |= REQ_F_PARTIAL_IO; 929 return -EAGAIN; 930 } 931 if (ret == -ERESTARTSYS) 932 ret = -EINTR; 933 req_set_fail(req); 934 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 935 out_free: 936 req_set_fail(req); 937 } 938 939 if (ret > 0) 940 ret += sr->done_io; 941 else if (sr->done_io) 942 ret = sr->done_io; 943 else 944 io_kbuf_recycle(req, issue_flags); 945 946 if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags)) 947 goto retry_multishot; 948 949 return ret; 950 } 951 952 void io_send_zc_cleanup(struct io_kiocb *req) 953 { 954 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 955 struct io_async_msghdr *io; 956 957 if (req_has_async_data(req)) { 958 io = req->async_data; 959 /* might be ->fast_iov if *msg_copy_hdr failed */ 960 if (io->free_iov != io->fast_iov) 961 kfree(io->free_iov); 962 } 963 if (zc->notif) { 964 io_notif_flush(zc->notif); 965 zc->notif = NULL; 966 } 967 } 968 969 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 970 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 971 972 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 973 { 974 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 975 struct io_ring_ctx *ctx = req->ctx; 976 struct io_kiocb *notif; 977 978 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 979 return -EINVAL; 980 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 981 if (req->flags & REQ_F_CQE_SKIP) 982 return -EINVAL; 983 984 notif = zc->notif = io_alloc_notif(ctx); 985 if (!notif) 986 return -ENOMEM; 987 notif->cqe.user_data = req->cqe.user_data; 988 notif->cqe.res = 0; 989 notif->cqe.flags = IORING_CQE_F_NOTIF; 990 req->flags |= REQ_F_NEED_CLEANUP; 991 992 zc->flags = READ_ONCE(sqe->ioprio); 993 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 994 if (zc->flags & ~IO_ZC_FLAGS_VALID) 995 return -EINVAL; 996 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 997 io_notif_set_extended(notif); 998 io_notif_to_data(notif)->zc_report = true; 999 } 1000 } 1001 1002 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1003 unsigned idx = READ_ONCE(sqe->buf_index); 1004 1005 if (unlikely(idx >= ctx->nr_user_bufs)) 1006 return -EFAULT; 1007 idx = array_index_nospec(idx, ctx->nr_user_bufs); 1008 req->imu = READ_ONCE(ctx->user_bufs[idx]); 1009 io_req_set_rsrc_node(notif, ctx, 0); 1010 } 1011 1012 if (req->opcode == IORING_OP_SEND_ZC) { 1013 if (READ_ONCE(sqe->__pad3[0])) 1014 return -EINVAL; 1015 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1016 zc->addr_len = READ_ONCE(sqe->addr_len); 1017 } else { 1018 if (unlikely(sqe->addr2 || sqe->file_index)) 1019 return -EINVAL; 1020 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF)) 1021 return -EINVAL; 1022 } 1023 1024 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1025 zc->len = READ_ONCE(sqe->len); 1026 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 1027 if (zc->msg_flags & MSG_DONTWAIT) 1028 req->flags |= REQ_F_NOWAIT; 1029 1030 zc->done_io = 0; 1031 1032 #ifdef CONFIG_COMPAT 1033 if (req->ctx->compat) 1034 zc->msg_flags |= MSG_CMSG_COMPAT; 1035 #endif 1036 return 0; 1037 } 1038 1039 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, 1040 struct iov_iter *from, size_t length) 1041 { 1042 skb_zcopy_downgrade_managed(skb); 1043 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1044 } 1045 1046 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, 1047 struct iov_iter *from, size_t length) 1048 { 1049 struct skb_shared_info *shinfo = skb_shinfo(skb); 1050 int frag = shinfo->nr_frags; 1051 int ret = 0; 1052 struct bvec_iter bi; 1053 ssize_t copied = 0; 1054 unsigned long truesize = 0; 1055 1056 if (!frag) 1057 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1058 else if (unlikely(!skb_zcopy_managed(skb))) 1059 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1060 1061 bi.bi_size = min(from->count, length); 1062 bi.bi_bvec_done = from->iov_offset; 1063 bi.bi_idx = 0; 1064 1065 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1066 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1067 1068 copied += v.bv_len; 1069 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1070 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1071 v.bv_offset, v.bv_len); 1072 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1073 } 1074 if (bi.bi_size) 1075 ret = -EMSGSIZE; 1076 1077 shinfo->nr_frags = frag; 1078 from->bvec += bi.bi_idx; 1079 from->nr_segs -= bi.bi_idx; 1080 from->count -= copied; 1081 from->iov_offset = bi.bi_bvec_done; 1082 1083 skb->data_len += copied; 1084 skb->len += copied; 1085 skb->truesize += truesize; 1086 1087 if (sk && sk->sk_type == SOCK_STREAM) { 1088 sk_wmem_queued_add(sk, truesize); 1089 if (!skb_zcopy_pure(skb)) 1090 sk_mem_charge(sk, truesize); 1091 } else { 1092 refcount_add(truesize, &skb->sk->sk_wmem_alloc); 1093 } 1094 return ret; 1095 } 1096 1097 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1098 { 1099 struct sockaddr_storage __address; 1100 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1101 struct msghdr msg; 1102 struct socket *sock; 1103 unsigned msg_flags; 1104 int ret, min_ret = 0; 1105 1106 sock = sock_from_file(req->file); 1107 if (unlikely(!sock)) 1108 return -ENOTSOCK; 1109 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1110 return -EOPNOTSUPP; 1111 1112 msg.msg_name = NULL; 1113 msg.msg_control = NULL; 1114 msg.msg_controllen = 0; 1115 msg.msg_namelen = 0; 1116 1117 if (zc->addr) { 1118 if (req_has_async_data(req)) { 1119 struct io_async_msghdr *io = req->async_data; 1120 1121 msg.msg_name = &io->addr; 1122 } else { 1123 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address); 1124 if (unlikely(ret < 0)) 1125 return ret; 1126 msg.msg_name = (struct sockaddr *)&__address; 1127 } 1128 msg.msg_namelen = zc->addr_len; 1129 } 1130 1131 if (!(req->flags & REQ_F_POLLED) && 1132 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1133 return io_setup_async_addr(req, &__address, issue_flags); 1134 1135 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1136 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu, 1137 (u64)(uintptr_t)zc->buf, zc->len); 1138 if (unlikely(ret)) 1139 return ret; 1140 msg.sg_from_iter = io_sg_from_iter; 1141 } else { 1142 io_notif_set_extended(zc->notif); 1143 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter); 1144 if (unlikely(ret)) 1145 return ret; 1146 ret = io_notif_account_mem(zc->notif, zc->len); 1147 if (unlikely(ret)) 1148 return ret; 1149 msg.sg_from_iter = io_sg_from_iter_iovec; 1150 } 1151 1152 msg_flags = zc->msg_flags | MSG_ZEROCOPY; 1153 if (issue_flags & IO_URING_F_NONBLOCK) 1154 msg_flags |= MSG_DONTWAIT; 1155 if (msg_flags & MSG_WAITALL) 1156 min_ret = iov_iter_count(&msg.msg_iter); 1157 1158 msg.msg_flags = msg_flags; 1159 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1160 ret = sock_sendmsg(sock, &msg); 1161 1162 if (unlikely(ret < min_ret)) { 1163 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1164 return io_setup_async_addr(req, &__address, issue_flags); 1165 1166 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { 1167 zc->len -= ret; 1168 zc->buf += ret; 1169 zc->done_io += ret; 1170 req->flags |= REQ_F_PARTIAL_IO; 1171 return io_setup_async_addr(req, &__address, issue_flags); 1172 } 1173 if (ret == -ERESTARTSYS) 1174 ret = -EINTR; 1175 req_set_fail(req); 1176 } 1177 1178 if (ret >= 0) 1179 ret += zc->done_io; 1180 else if (zc->done_io) 1181 ret = zc->done_io; 1182 1183 /* 1184 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1185 * flushing notif to io_send_zc_cleanup() 1186 */ 1187 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1188 io_notif_flush(zc->notif); 1189 req->flags &= ~REQ_F_NEED_CLEANUP; 1190 } 1191 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1192 return IOU_OK; 1193 } 1194 1195 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1196 { 1197 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1198 struct io_async_msghdr iomsg, *kmsg; 1199 struct socket *sock; 1200 unsigned flags; 1201 int ret, min_ret = 0; 1202 1203 io_notif_set_extended(sr->notif); 1204 1205 sock = sock_from_file(req->file); 1206 if (unlikely(!sock)) 1207 return -ENOTSOCK; 1208 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1209 return -EOPNOTSUPP; 1210 1211 if (req_has_async_data(req)) { 1212 kmsg = req->async_data; 1213 } else { 1214 ret = io_sendmsg_copy_hdr(req, &iomsg); 1215 if (ret) 1216 return ret; 1217 kmsg = &iomsg; 1218 } 1219 1220 if (!(req->flags & REQ_F_POLLED) && 1221 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1222 return io_setup_async_msg(req, kmsg, issue_flags); 1223 1224 flags = sr->msg_flags | MSG_ZEROCOPY; 1225 if (issue_flags & IO_URING_F_NONBLOCK) 1226 flags |= MSG_DONTWAIT; 1227 if (flags & MSG_WAITALL) 1228 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1229 1230 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1231 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1232 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1233 1234 if (unlikely(ret < min_ret)) { 1235 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1236 return io_setup_async_msg(req, kmsg, issue_flags); 1237 1238 if (ret > 0 && io_net_retry(sock, flags)) { 1239 sr->done_io += ret; 1240 req->flags |= REQ_F_PARTIAL_IO; 1241 return io_setup_async_msg(req, kmsg, issue_flags); 1242 } 1243 if (ret == -ERESTARTSYS) 1244 ret = -EINTR; 1245 req_set_fail(req); 1246 } 1247 /* fast path, check for non-NULL to avoid function call */ 1248 if (kmsg->free_iov) { 1249 kfree(kmsg->free_iov); 1250 kmsg->free_iov = NULL; 1251 } 1252 1253 io_netmsg_recycle(req, issue_flags); 1254 if (ret >= 0) 1255 ret += sr->done_io; 1256 else if (sr->done_io) 1257 ret = sr->done_io; 1258 1259 /* 1260 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1261 * flushing notif to io_send_zc_cleanup() 1262 */ 1263 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1264 io_notif_flush(sr->notif); 1265 req->flags &= ~REQ_F_NEED_CLEANUP; 1266 } 1267 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1268 return IOU_OK; 1269 } 1270 1271 void io_sendrecv_fail(struct io_kiocb *req) 1272 { 1273 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1274 1275 if (req->flags & REQ_F_PARTIAL_IO) 1276 req->cqe.res = sr->done_io; 1277 1278 if ((req->flags & REQ_F_NEED_CLEANUP) && 1279 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1280 req->cqe.flags |= IORING_CQE_F_MORE; 1281 } 1282 1283 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1284 { 1285 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1286 unsigned flags; 1287 1288 if (sqe->len || sqe->buf_index) 1289 return -EINVAL; 1290 1291 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1292 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1293 accept->flags = READ_ONCE(sqe->accept_flags); 1294 accept->nofile = rlimit(RLIMIT_NOFILE); 1295 flags = READ_ONCE(sqe->ioprio); 1296 if (flags & ~IORING_ACCEPT_MULTISHOT) 1297 return -EINVAL; 1298 1299 accept->file_slot = READ_ONCE(sqe->file_index); 1300 if (accept->file_slot) { 1301 if (accept->flags & SOCK_CLOEXEC) 1302 return -EINVAL; 1303 if (flags & IORING_ACCEPT_MULTISHOT && 1304 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1305 return -EINVAL; 1306 } 1307 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1308 return -EINVAL; 1309 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1310 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1311 if (flags & IORING_ACCEPT_MULTISHOT) 1312 req->flags |= REQ_F_APOLL_MULTISHOT; 1313 return 0; 1314 } 1315 1316 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1317 { 1318 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1319 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1320 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; 1321 bool fixed = !!accept->file_slot; 1322 struct file *file; 1323 int ret, fd; 1324 1325 if (!io_check_multishot(req, issue_flags)) 1326 return -EAGAIN; 1327 retry: 1328 if (!fixed) { 1329 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1330 if (unlikely(fd < 0)) 1331 return fd; 1332 } 1333 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, 1334 accept->flags); 1335 if (IS_ERR(file)) { 1336 if (!fixed) 1337 put_unused_fd(fd); 1338 ret = PTR_ERR(file); 1339 if (ret == -EAGAIN && force_nonblock) { 1340 /* 1341 * if it's multishot and polled, we don't need to 1342 * return EAGAIN to arm the poll infra since it 1343 * has already been done 1344 */ 1345 if (issue_flags & IO_URING_F_MULTISHOT) 1346 ret = IOU_ISSUE_SKIP_COMPLETE; 1347 return ret; 1348 } 1349 if (ret == -ERESTARTSYS) 1350 ret = -EINTR; 1351 req_set_fail(req); 1352 } else if (!fixed) { 1353 fd_install(fd, file); 1354 ret = fd; 1355 } else { 1356 ret = io_fixed_fd_install(req, issue_flags, file, 1357 accept->file_slot); 1358 } 1359 1360 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1361 io_req_set_res(req, ret, 0); 1362 return IOU_OK; 1363 } 1364 1365 if (ret < 0) 1366 return ret; 1367 if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret, 1368 IORING_CQE_F_MORE, true)) 1369 goto retry; 1370 1371 return -ECANCELED; 1372 } 1373 1374 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1375 { 1376 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1377 1378 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1379 return -EINVAL; 1380 1381 sock->domain = READ_ONCE(sqe->fd); 1382 sock->type = READ_ONCE(sqe->off); 1383 sock->protocol = READ_ONCE(sqe->len); 1384 sock->file_slot = READ_ONCE(sqe->file_index); 1385 sock->nofile = rlimit(RLIMIT_NOFILE); 1386 1387 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1388 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1389 return -EINVAL; 1390 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1391 return -EINVAL; 1392 return 0; 1393 } 1394 1395 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1396 { 1397 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1398 bool fixed = !!sock->file_slot; 1399 struct file *file; 1400 int ret, fd; 1401 1402 if (!fixed) { 1403 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1404 if (unlikely(fd < 0)) 1405 return fd; 1406 } 1407 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1408 if (IS_ERR(file)) { 1409 if (!fixed) 1410 put_unused_fd(fd); 1411 ret = PTR_ERR(file); 1412 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1413 return -EAGAIN; 1414 if (ret == -ERESTARTSYS) 1415 ret = -EINTR; 1416 req_set_fail(req); 1417 } else if (!fixed) { 1418 fd_install(fd, file); 1419 ret = fd; 1420 } else { 1421 ret = io_fixed_fd_install(req, issue_flags, file, 1422 sock->file_slot); 1423 } 1424 io_req_set_res(req, ret, 0); 1425 return IOU_OK; 1426 } 1427 1428 int io_connect_prep_async(struct io_kiocb *req) 1429 { 1430 struct io_async_connect *io = req->async_data; 1431 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1432 1433 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address); 1434 } 1435 1436 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1437 { 1438 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1439 1440 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1441 return -EINVAL; 1442 1443 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1444 conn->addr_len = READ_ONCE(sqe->addr2); 1445 conn->in_progress = conn->seen_econnaborted = false; 1446 return 0; 1447 } 1448 1449 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1450 { 1451 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1452 struct io_async_connect __io, *io; 1453 unsigned file_flags; 1454 int ret; 1455 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1456 1457 if (connect->in_progress) { 1458 struct socket *socket; 1459 1460 ret = -ENOTSOCK; 1461 socket = sock_from_file(req->file); 1462 if (socket) 1463 ret = sock_error(socket->sk); 1464 goto out; 1465 } 1466 1467 if (req_has_async_data(req)) { 1468 io = req->async_data; 1469 } else { 1470 ret = move_addr_to_kernel(connect->addr, 1471 connect->addr_len, 1472 &__io.address); 1473 if (ret) 1474 goto out; 1475 io = &__io; 1476 } 1477 1478 file_flags = force_nonblock ? O_NONBLOCK : 0; 1479 1480 ret = __sys_connect_file(req->file, &io->address, 1481 connect->addr_len, file_flags); 1482 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1483 && force_nonblock) { 1484 if (ret == -EINPROGRESS) { 1485 connect->in_progress = true; 1486 return -EAGAIN; 1487 } 1488 if (ret == -ECONNABORTED) { 1489 if (connect->seen_econnaborted) 1490 goto out; 1491 connect->seen_econnaborted = true; 1492 } 1493 if (req_has_async_data(req)) 1494 return -EAGAIN; 1495 if (io_alloc_async_data(req)) { 1496 ret = -ENOMEM; 1497 goto out; 1498 } 1499 memcpy(req->async_data, &__io, sizeof(__io)); 1500 return -EAGAIN; 1501 } 1502 if (ret == -ERESTARTSYS) 1503 ret = -EINTR; 1504 out: 1505 if (ret < 0) 1506 req_set_fail(req); 1507 io_req_set_res(req, ret, 0); 1508 return IOU_OK; 1509 } 1510 1511 void io_netmsg_cache_free(struct io_cache_entry *entry) 1512 { 1513 kfree(container_of(entry, struct io_async_msghdr, cache)); 1514 } 1515 #endif 1516