1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "io_uring.h" 14 #include "kbuf.h" 15 #include "alloc_cache.h" 16 #include "net.h" 17 #include "notif.h" 18 #include "rsrc.h" 19 20 #if defined(CONFIG_NET) 21 struct io_shutdown { 22 struct file *file; 23 int how; 24 }; 25 26 struct io_accept { 27 struct file *file; 28 struct sockaddr __user *addr; 29 int __user *addr_len; 30 int flags; 31 u32 file_slot; 32 unsigned long nofile; 33 }; 34 35 struct io_socket { 36 struct file *file; 37 int domain; 38 int type; 39 int protocol; 40 int flags; 41 u32 file_slot; 42 unsigned long nofile; 43 }; 44 45 struct io_connect { 46 struct file *file; 47 struct sockaddr __user *addr; 48 int addr_len; 49 bool in_progress; 50 bool seen_econnaborted; 51 }; 52 53 struct io_sr_msg { 54 struct file *file; 55 union { 56 struct compat_msghdr __user *umsg_compat; 57 struct user_msghdr __user *umsg; 58 void __user *buf; 59 }; 60 unsigned len; 61 unsigned done_io; 62 unsigned msg_flags; 63 u16 flags; 64 /* initialised and used only by !msg send variants */ 65 u16 addr_len; 66 u16 buf_group; 67 void __user *addr; 68 void __user *msg_control; 69 /* used only for send zerocopy */ 70 struct io_kiocb *notif; 71 }; 72 73 static inline bool io_check_multishot(struct io_kiocb *req, 74 unsigned int issue_flags) 75 { 76 /* 77 * When ->locked_cq is set we only allow to post CQEs from the original 78 * task context. Usual request completions will be handled in other 79 * generic paths but multipoll may decide to post extra cqes. 80 */ 81 return !(issue_flags & IO_URING_F_IOWQ) || 82 !(issue_flags & IO_URING_F_MULTISHOT) || 83 !req->ctx->task_complete; 84 } 85 86 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 87 { 88 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 89 90 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 91 sqe->buf_index || sqe->splice_fd_in)) 92 return -EINVAL; 93 94 shutdown->how = READ_ONCE(sqe->len); 95 req->flags |= REQ_F_FORCE_ASYNC; 96 return 0; 97 } 98 99 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 100 { 101 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 102 struct socket *sock; 103 int ret; 104 105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 106 107 sock = sock_from_file(req->file); 108 if (unlikely(!sock)) 109 return -ENOTSOCK; 110 111 ret = __sys_shutdown_sock(sock, shutdown->how); 112 io_req_set_res(req, ret, 0); 113 return IOU_OK; 114 } 115 116 static bool io_net_retry(struct socket *sock, int flags) 117 { 118 if (!(flags & MSG_WAITALL)) 119 return false; 120 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 121 } 122 123 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 124 { 125 struct io_async_msghdr *hdr = req->async_data; 126 127 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) 128 return; 129 130 /* Let normal cleanup path reap it if we fail adding to the cache */ 131 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { 132 req->async_data = NULL; 133 req->flags &= ~REQ_F_ASYNC_DATA; 134 } 135 } 136 137 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, 138 unsigned int issue_flags) 139 { 140 struct io_ring_ctx *ctx = req->ctx; 141 struct io_cache_entry *entry; 142 struct io_async_msghdr *hdr; 143 144 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 145 entry = io_alloc_cache_get(&ctx->netmsg_cache); 146 if (entry) { 147 hdr = container_of(entry, struct io_async_msghdr, cache); 148 hdr->free_iov = NULL; 149 req->flags |= REQ_F_ASYNC_DATA; 150 req->async_data = hdr; 151 return hdr; 152 } 153 } 154 155 if (!io_alloc_async_data(req)) { 156 hdr = req->async_data; 157 hdr->free_iov = NULL; 158 return hdr; 159 } 160 return NULL; 161 } 162 163 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req) 164 { 165 /* ->prep_async is always called from the submission context */ 166 return io_msg_alloc_async(req, 0); 167 } 168 169 static int io_setup_async_msg(struct io_kiocb *req, 170 struct io_async_msghdr *kmsg, 171 unsigned int issue_flags) 172 { 173 struct io_async_msghdr *async_msg; 174 175 if (req_has_async_data(req)) 176 return -EAGAIN; 177 async_msg = io_msg_alloc_async(req, issue_flags); 178 if (!async_msg) { 179 kfree(kmsg->free_iov); 180 return -ENOMEM; 181 } 182 req->flags |= REQ_F_NEED_CLEANUP; 183 memcpy(async_msg, kmsg, sizeof(*kmsg)); 184 if (async_msg->msg.msg_name) 185 async_msg->msg.msg_name = &async_msg->addr; 186 /* if were using fast_iov, set it to the new one */ 187 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) { 188 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov; 189 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx]; 190 } 191 192 return -EAGAIN; 193 } 194 195 static int io_sendmsg_copy_hdr(struct io_kiocb *req, 196 struct io_async_msghdr *iomsg) 197 { 198 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 199 int ret; 200 201 iomsg->msg.msg_name = &iomsg->addr; 202 iomsg->free_iov = iomsg->fast_iov; 203 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags, 204 &iomsg->free_iov); 205 /* save msg_control as sys_sendmsg() overwrites it */ 206 sr->msg_control = iomsg->msg.msg_control; 207 return ret; 208 } 209 210 int io_send_prep_async(struct io_kiocb *req) 211 { 212 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 213 struct io_async_msghdr *io; 214 int ret; 215 216 if (!zc->addr || req_has_async_data(req)) 217 return 0; 218 io = io_msg_alloc_async_prep(req); 219 if (!io) 220 return -ENOMEM; 221 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); 222 return ret; 223 } 224 225 static int io_setup_async_addr(struct io_kiocb *req, 226 struct sockaddr_storage *addr_storage, 227 unsigned int issue_flags) 228 { 229 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 230 struct io_async_msghdr *io; 231 232 if (!sr->addr || req_has_async_data(req)) 233 return -EAGAIN; 234 io = io_msg_alloc_async(req, issue_flags); 235 if (!io) 236 return -ENOMEM; 237 memcpy(&io->addr, addr_storage, sizeof(io->addr)); 238 return -EAGAIN; 239 } 240 241 int io_sendmsg_prep_async(struct io_kiocb *req) 242 { 243 int ret; 244 245 if (!io_msg_alloc_async_prep(req)) 246 return -ENOMEM; 247 ret = io_sendmsg_copy_hdr(req, req->async_data); 248 if (!ret) 249 req->flags |= REQ_F_NEED_CLEANUP; 250 return ret; 251 } 252 253 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 254 { 255 struct io_async_msghdr *io = req->async_data; 256 257 kfree(io->free_iov); 258 } 259 260 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 261 { 262 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 263 264 if (req->opcode == IORING_OP_SEND) { 265 if (READ_ONCE(sqe->__pad3[0])) 266 return -EINVAL; 267 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 268 sr->addr_len = READ_ONCE(sqe->addr_len); 269 } else if (sqe->addr2 || sqe->file_index) { 270 return -EINVAL; 271 } 272 273 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 274 sr->len = READ_ONCE(sqe->len); 275 sr->flags = READ_ONCE(sqe->ioprio); 276 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) 277 return -EINVAL; 278 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 279 if (sr->msg_flags & MSG_DONTWAIT) 280 req->flags |= REQ_F_NOWAIT; 281 282 #ifdef CONFIG_COMPAT 283 if (req->ctx->compat) 284 sr->msg_flags |= MSG_CMSG_COMPAT; 285 #endif 286 sr->done_io = 0; 287 return 0; 288 } 289 290 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 291 { 292 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 293 struct io_async_msghdr iomsg, *kmsg; 294 struct socket *sock; 295 unsigned flags; 296 int min_ret = 0; 297 int ret; 298 299 sock = sock_from_file(req->file); 300 if (unlikely(!sock)) 301 return -ENOTSOCK; 302 303 if (req_has_async_data(req)) { 304 kmsg = req->async_data; 305 kmsg->msg.msg_control = sr->msg_control; 306 } else { 307 ret = io_sendmsg_copy_hdr(req, &iomsg); 308 if (ret) 309 return ret; 310 kmsg = &iomsg; 311 } 312 313 if (!(req->flags & REQ_F_POLLED) && 314 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 315 return io_setup_async_msg(req, kmsg, issue_flags); 316 317 flags = sr->msg_flags; 318 if (issue_flags & IO_URING_F_NONBLOCK) 319 flags |= MSG_DONTWAIT; 320 if (flags & MSG_WAITALL) 321 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 322 323 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 324 325 if (ret < min_ret) { 326 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 327 return io_setup_async_msg(req, kmsg, issue_flags); 328 if (ret > 0 && io_net_retry(sock, flags)) { 329 kmsg->msg.msg_controllen = 0; 330 kmsg->msg.msg_control = NULL; 331 sr->done_io += ret; 332 req->flags |= REQ_F_PARTIAL_IO; 333 return io_setup_async_msg(req, kmsg, issue_flags); 334 } 335 if (ret == -ERESTARTSYS) 336 ret = -EINTR; 337 req_set_fail(req); 338 } 339 /* fast path, check for non-NULL to avoid function call */ 340 if (kmsg->free_iov) 341 kfree(kmsg->free_iov); 342 req->flags &= ~REQ_F_NEED_CLEANUP; 343 io_netmsg_recycle(req, issue_flags); 344 if (ret >= 0) 345 ret += sr->done_io; 346 else if (sr->done_io) 347 ret = sr->done_io; 348 io_req_set_res(req, ret, 0); 349 return IOU_OK; 350 } 351 352 int io_send(struct io_kiocb *req, unsigned int issue_flags) 353 { 354 struct sockaddr_storage __address; 355 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 356 struct msghdr msg; 357 struct socket *sock; 358 unsigned flags; 359 int min_ret = 0; 360 int ret; 361 362 msg.msg_name = NULL; 363 msg.msg_control = NULL; 364 msg.msg_controllen = 0; 365 msg.msg_namelen = 0; 366 msg.msg_ubuf = NULL; 367 368 if (sr->addr) { 369 if (req_has_async_data(req)) { 370 struct io_async_msghdr *io = req->async_data; 371 372 msg.msg_name = &io->addr; 373 } else { 374 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address); 375 if (unlikely(ret < 0)) 376 return ret; 377 msg.msg_name = (struct sockaddr *)&__address; 378 } 379 msg.msg_namelen = sr->addr_len; 380 } 381 382 if (!(req->flags & REQ_F_POLLED) && 383 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 384 return io_setup_async_addr(req, &__address, issue_flags); 385 386 sock = sock_from_file(req->file); 387 if (unlikely(!sock)) 388 return -ENOTSOCK; 389 390 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter); 391 if (unlikely(ret)) 392 return ret; 393 394 flags = sr->msg_flags; 395 if (issue_flags & IO_URING_F_NONBLOCK) 396 flags |= MSG_DONTWAIT; 397 if (flags & MSG_WAITALL) 398 min_ret = iov_iter_count(&msg.msg_iter); 399 400 msg.msg_flags = flags; 401 ret = sock_sendmsg(sock, &msg); 402 if (ret < min_ret) { 403 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 404 return io_setup_async_addr(req, &__address, issue_flags); 405 406 if (ret > 0 && io_net_retry(sock, flags)) { 407 sr->len -= ret; 408 sr->buf += ret; 409 sr->done_io += ret; 410 req->flags |= REQ_F_PARTIAL_IO; 411 return io_setup_async_addr(req, &__address, issue_flags); 412 } 413 if (ret == -ERESTARTSYS) 414 ret = -EINTR; 415 req_set_fail(req); 416 } 417 if (ret >= 0) 418 ret += sr->done_io; 419 else if (sr->done_io) 420 ret = sr->done_io; 421 io_req_set_res(req, ret, 0); 422 return IOU_OK; 423 } 424 425 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) 426 { 427 int hdr; 428 429 if (iomsg->namelen < 0) 430 return true; 431 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), 432 iomsg->namelen, &hdr)) 433 return true; 434 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) 435 return true; 436 437 return false; 438 } 439 440 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, 441 struct io_async_msghdr *iomsg) 442 { 443 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 444 struct user_msghdr msg; 445 int ret; 446 447 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) 448 return -EFAULT; 449 450 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 451 if (ret) 452 return ret; 453 454 if (req->flags & REQ_F_BUFFER_SELECT) { 455 if (msg.msg_iovlen == 0) { 456 sr->len = iomsg->fast_iov[0].iov_len = 0; 457 iomsg->fast_iov[0].iov_base = NULL; 458 iomsg->free_iov = NULL; 459 } else if (msg.msg_iovlen > 1) { 460 return -EINVAL; 461 } else { 462 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov))) 463 return -EFAULT; 464 sr->len = iomsg->fast_iov[0].iov_len; 465 iomsg->free_iov = NULL; 466 } 467 468 if (req->flags & REQ_F_APOLL_MULTISHOT) { 469 iomsg->namelen = msg.msg_namelen; 470 iomsg->controllen = msg.msg_controllen; 471 if (io_recvmsg_multishot_overflow(iomsg)) 472 return -EOVERFLOW; 473 } 474 } else { 475 iomsg->free_iov = iomsg->fast_iov; 476 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, 477 &iomsg->free_iov, &iomsg->msg.msg_iter, 478 false); 479 if (ret > 0) 480 ret = 0; 481 } 482 483 return ret; 484 } 485 486 #ifdef CONFIG_COMPAT 487 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, 488 struct io_async_msghdr *iomsg) 489 { 490 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 491 struct compat_msghdr msg; 492 struct compat_iovec __user *uiov; 493 int ret; 494 495 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) 496 return -EFAULT; 497 498 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 499 if (ret) 500 return ret; 501 502 uiov = compat_ptr(msg.msg_iov); 503 if (req->flags & REQ_F_BUFFER_SELECT) { 504 compat_ssize_t clen; 505 506 iomsg->free_iov = NULL; 507 if (msg.msg_iovlen == 0) { 508 sr->len = 0; 509 } else if (msg.msg_iovlen > 1) { 510 return -EINVAL; 511 } else { 512 if (!access_ok(uiov, sizeof(*uiov))) 513 return -EFAULT; 514 if (__get_user(clen, &uiov->iov_len)) 515 return -EFAULT; 516 if (clen < 0) 517 return -EINVAL; 518 sr->len = clen; 519 } 520 521 if (req->flags & REQ_F_APOLL_MULTISHOT) { 522 iomsg->namelen = msg.msg_namelen; 523 iomsg->controllen = msg.msg_controllen; 524 if (io_recvmsg_multishot_overflow(iomsg)) 525 return -EOVERFLOW; 526 } 527 } else { 528 iomsg->free_iov = iomsg->fast_iov; 529 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen, 530 UIO_FASTIOV, &iomsg->free_iov, 531 &iomsg->msg.msg_iter, true); 532 if (ret < 0) 533 return ret; 534 } 535 536 return 0; 537 } 538 #endif 539 540 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 541 struct io_async_msghdr *iomsg) 542 { 543 iomsg->msg.msg_name = &iomsg->addr; 544 545 #ifdef CONFIG_COMPAT 546 if (req->ctx->compat) 547 return __io_compat_recvmsg_copy_hdr(req, iomsg); 548 #endif 549 550 return __io_recvmsg_copy_hdr(req, iomsg); 551 } 552 553 int io_recvmsg_prep_async(struct io_kiocb *req) 554 { 555 int ret; 556 557 if (!io_msg_alloc_async_prep(req)) 558 return -ENOMEM; 559 ret = io_recvmsg_copy_hdr(req, req->async_data); 560 if (!ret) 561 req->flags |= REQ_F_NEED_CLEANUP; 562 return ret; 563 } 564 565 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT) 566 567 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 568 { 569 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 570 571 if (unlikely(sqe->file_index || sqe->addr2)) 572 return -EINVAL; 573 574 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 575 sr->len = READ_ONCE(sqe->len); 576 sr->flags = READ_ONCE(sqe->ioprio); 577 if (sr->flags & ~(RECVMSG_FLAGS)) 578 return -EINVAL; 579 sr->msg_flags = READ_ONCE(sqe->msg_flags); 580 if (sr->msg_flags & MSG_DONTWAIT) 581 req->flags |= REQ_F_NOWAIT; 582 if (sr->msg_flags & MSG_ERRQUEUE) 583 req->flags |= REQ_F_CLEAR_POLLIN; 584 if (sr->flags & IORING_RECV_MULTISHOT) { 585 if (!(req->flags & REQ_F_BUFFER_SELECT)) 586 return -EINVAL; 587 if (sr->msg_flags & MSG_WAITALL) 588 return -EINVAL; 589 if (req->opcode == IORING_OP_RECV && sr->len) 590 return -EINVAL; 591 req->flags |= REQ_F_APOLL_MULTISHOT; 592 /* 593 * Store the buffer group for this multishot receive separately, 594 * as if we end up doing an io-wq based issue that selects a 595 * buffer, it has to be committed immediately and that will 596 * clear ->buf_list. This means we lose the link to the buffer 597 * list, and the eventual buffer put on completion then cannot 598 * restore it. 599 */ 600 sr->buf_group = req->buf_index; 601 } 602 603 #ifdef CONFIG_COMPAT 604 if (req->ctx->compat) 605 sr->msg_flags |= MSG_CMSG_COMPAT; 606 #endif 607 sr->done_io = 0; 608 return 0; 609 } 610 611 static inline void io_recv_prep_retry(struct io_kiocb *req) 612 { 613 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 614 615 sr->done_io = 0; 616 sr->len = 0; /* get from the provided buffer */ 617 req->buf_index = sr->buf_group; 618 } 619 620 /* 621 * Finishes io_recv and io_recvmsg. 622 * 623 * Returns true if it is actually finished, or false if it should run 624 * again (for multishot). 625 */ 626 static inline bool io_recv_finish(struct io_kiocb *req, int *ret, 627 unsigned int cflags, bool mshot_finished, 628 unsigned issue_flags) 629 { 630 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 631 io_req_set_res(req, *ret, cflags); 632 *ret = IOU_OK; 633 return true; 634 } 635 636 if (!mshot_finished) { 637 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, 638 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) { 639 io_recv_prep_retry(req); 640 return false; 641 } 642 /* Otherwise stop multishot but use the current result. */ 643 } 644 645 io_req_set_res(req, *ret, cflags); 646 647 if (issue_flags & IO_URING_F_MULTISHOT) 648 *ret = IOU_STOP_MULTISHOT; 649 else 650 *ret = IOU_OK; 651 return true; 652 } 653 654 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 655 struct io_sr_msg *sr, void __user **buf, 656 size_t *len) 657 { 658 unsigned long ubuf = (unsigned long) *buf; 659 unsigned long hdr; 660 661 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 662 kmsg->controllen; 663 if (*len < hdr) 664 return -EFAULT; 665 666 if (kmsg->controllen) { 667 unsigned long control = ubuf + hdr - kmsg->controllen; 668 669 kmsg->msg.msg_control_user = (void __user *) control; 670 kmsg->msg.msg_controllen = kmsg->controllen; 671 } 672 673 sr->buf = *buf; /* stash for later copy */ 674 *buf = (void __user *) (ubuf + hdr); 675 kmsg->payloadlen = *len = *len - hdr; 676 return 0; 677 } 678 679 struct io_recvmsg_multishot_hdr { 680 struct io_uring_recvmsg_out msg; 681 struct sockaddr_storage addr; 682 }; 683 684 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 685 struct io_async_msghdr *kmsg, 686 unsigned int flags, bool *finished) 687 { 688 int err; 689 int copy_len; 690 struct io_recvmsg_multishot_hdr hdr; 691 692 if (kmsg->namelen) 693 kmsg->msg.msg_name = &hdr.addr; 694 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 695 kmsg->msg.msg_namelen = 0; 696 697 if (sock->file->f_flags & O_NONBLOCK) 698 flags |= MSG_DONTWAIT; 699 700 err = sock_recvmsg(sock, &kmsg->msg, flags); 701 *finished = err <= 0; 702 if (err < 0) 703 return err; 704 705 hdr.msg = (struct io_uring_recvmsg_out) { 706 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 707 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 708 }; 709 710 hdr.msg.payloadlen = err; 711 if (err > kmsg->payloadlen) 712 err = kmsg->payloadlen; 713 714 copy_len = sizeof(struct io_uring_recvmsg_out); 715 if (kmsg->msg.msg_namelen > kmsg->namelen) 716 copy_len += kmsg->namelen; 717 else 718 copy_len += kmsg->msg.msg_namelen; 719 720 /* 721 * "fromlen shall refer to the value before truncation.." 722 * 1003.1g 723 */ 724 hdr.msg.namelen = kmsg->msg.msg_namelen; 725 726 /* ensure that there is no gap between hdr and sockaddr_storage */ 727 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 728 sizeof(struct io_uring_recvmsg_out)); 729 if (copy_to_user(io->buf, &hdr, copy_len)) { 730 *finished = true; 731 return -EFAULT; 732 } 733 734 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 735 kmsg->controllen + err; 736 } 737 738 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 739 { 740 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 741 struct io_async_msghdr iomsg, *kmsg; 742 struct socket *sock; 743 unsigned int cflags; 744 unsigned flags; 745 int ret, min_ret = 0; 746 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 747 bool mshot_finished = true; 748 749 sock = sock_from_file(req->file); 750 if (unlikely(!sock)) 751 return -ENOTSOCK; 752 753 if (req_has_async_data(req)) { 754 kmsg = req->async_data; 755 } else { 756 ret = io_recvmsg_copy_hdr(req, &iomsg); 757 if (ret) 758 return ret; 759 kmsg = &iomsg; 760 } 761 762 if (!(req->flags & REQ_F_POLLED) && 763 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 764 return io_setup_async_msg(req, kmsg, issue_flags); 765 766 if (!io_check_multishot(req, issue_flags)) 767 return io_setup_async_msg(req, kmsg, issue_flags); 768 769 retry_multishot: 770 if (io_do_buffer_select(req)) { 771 void __user *buf; 772 size_t len = sr->len; 773 774 buf = io_buffer_select(req, &len, issue_flags); 775 if (!buf) 776 return -ENOBUFS; 777 778 if (req->flags & REQ_F_APOLL_MULTISHOT) { 779 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); 780 if (ret) { 781 io_kbuf_recycle(req, issue_flags); 782 return ret; 783 } 784 } 785 786 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); 787 } 788 789 flags = sr->msg_flags; 790 if (force_nonblock) 791 flags |= MSG_DONTWAIT; 792 if (flags & MSG_WAITALL) 793 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 794 795 kmsg->msg.msg_get_inq = 1; 796 if (req->flags & REQ_F_APOLL_MULTISHOT) 797 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 798 &mshot_finished); 799 else 800 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 801 kmsg->uaddr, flags); 802 803 if (ret < min_ret) { 804 if (ret == -EAGAIN && force_nonblock) { 805 ret = io_setup_async_msg(req, kmsg, issue_flags); 806 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) { 807 io_kbuf_recycle(req, issue_flags); 808 return IOU_ISSUE_SKIP_COMPLETE; 809 } 810 return ret; 811 } 812 if (ret > 0 && io_net_retry(sock, flags)) { 813 sr->done_io += ret; 814 req->flags |= REQ_F_PARTIAL_IO; 815 return io_setup_async_msg(req, kmsg, issue_flags); 816 } 817 if (ret == -ERESTARTSYS) 818 ret = -EINTR; 819 req_set_fail(req); 820 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 821 req_set_fail(req); 822 } 823 824 if (ret > 0) 825 ret += sr->done_io; 826 else if (sr->done_io) 827 ret = sr->done_io; 828 else 829 io_kbuf_recycle(req, issue_flags); 830 831 cflags = io_put_kbuf(req, issue_flags); 832 if (kmsg->msg.msg_inq) 833 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 834 835 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags)) 836 goto retry_multishot; 837 838 if (mshot_finished) { 839 /* fast path, check for non-NULL to avoid function call */ 840 if (kmsg->free_iov) 841 kfree(kmsg->free_iov); 842 io_netmsg_recycle(req, issue_flags); 843 req->flags &= ~REQ_F_NEED_CLEANUP; 844 } 845 846 return ret; 847 } 848 849 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 850 { 851 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 852 struct msghdr msg; 853 struct socket *sock; 854 unsigned int cflags; 855 unsigned flags; 856 int ret, min_ret = 0; 857 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 858 size_t len = sr->len; 859 860 if (!(req->flags & REQ_F_POLLED) && 861 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 862 return -EAGAIN; 863 864 if (!io_check_multishot(req, issue_flags)) 865 return -EAGAIN; 866 867 sock = sock_from_file(req->file); 868 if (unlikely(!sock)) 869 return -ENOTSOCK; 870 871 retry_multishot: 872 if (io_do_buffer_select(req)) { 873 void __user *buf; 874 875 buf = io_buffer_select(req, &len, issue_flags); 876 if (!buf) 877 return -ENOBUFS; 878 sr->buf = buf; 879 } 880 881 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter); 882 if (unlikely(ret)) 883 goto out_free; 884 885 msg.msg_name = NULL; 886 msg.msg_namelen = 0; 887 msg.msg_control = NULL; 888 msg.msg_get_inq = 1; 889 msg.msg_flags = 0; 890 msg.msg_controllen = 0; 891 msg.msg_iocb = NULL; 892 msg.msg_ubuf = NULL; 893 894 flags = sr->msg_flags; 895 if (force_nonblock) 896 flags |= MSG_DONTWAIT; 897 if (flags & MSG_WAITALL) 898 min_ret = iov_iter_count(&msg.msg_iter); 899 900 ret = sock_recvmsg(sock, &msg, flags); 901 if (ret < min_ret) { 902 if (ret == -EAGAIN && force_nonblock) { 903 if (issue_flags & IO_URING_F_MULTISHOT) { 904 io_kbuf_recycle(req, issue_flags); 905 return IOU_ISSUE_SKIP_COMPLETE; 906 } 907 908 return -EAGAIN; 909 } 910 if (ret > 0 && io_net_retry(sock, flags)) { 911 sr->len -= ret; 912 sr->buf += ret; 913 sr->done_io += ret; 914 req->flags |= REQ_F_PARTIAL_IO; 915 return -EAGAIN; 916 } 917 if (ret == -ERESTARTSYS) 918 ret = -EINTR; 919 req_set_fail(req); 920 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 921 out_free: 922 req_set_fail(req); 923 } 924 925 if (ret > 0) 926 ret += sr->done_io; 927 else if (sr->done_io) 928 ret = sr->done_io; 929 else 930 io_kbuf_recycle(req, issue_flags); 931 932 cflags = io_put_kbuf(req, issue_flags); 933 if (msg.msg_inq) 934 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 935 936 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags)) 937 goto retry_multishot; 938 939 return ret; 940 } 941 942 void io_send_zc_cleanup(struct io_kiocb *req) 943 { 944 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 945 struct io_async_msghdr *io; 946 947 if (req_has_async_data(req)) { 948 io = req->async_data; 949 /* might be ->fast_iov if *msg_copy_hdr failed */ 950 if (io->free_iov != io->fast_iov) 951 kfree(io->free_iov); 952 } 953 if (zc->notif) { 954 io_notif_flush(zc->notif); 955 zc->notif = NULL; 956 } 957 } 958 959 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 960 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 961 962 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 963 { 964 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 965 struct io_ring_ctx *ctx = req->ctx; 966 struct io_kiocb *notif; 967 968 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 969 return -EINVAL; 970 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 971 if (req->flags & REQ_F_CQE_SKIP) 972 return -EINVAL; 973 974 notif = zc->notif = io_alloc_notif(ctx); 975 if (!notif) 976 return -ENOMEM; 977 notif->cqe.user_data = req->cqe.user_data; 978 notif->cqe.res = 0; 979 notif->cqe.flags = IORING_CQE_F_NOTIF; 980 req->flags |= REQ_F_NEED_CLEANUP; 981 982 zc->flags = READ_ONCE(sqe->ioprio); 983 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 984 if (zc->flags & ~IO_ZC_FLAGS_VALID) 985 return -EINVAL; 986 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 987 io_notif_set_extended(notif); 988 io_notif_to_data(notif)->zc_report = true; 989 } 990 } 991 992 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 993 unsigned idx = READ_ONCE(sqe->buf_index); 994 995 if (unlikely(idx >= ctx->nr_user_bufs)) 996 return -EFAULT; 997 idx = array_index_nospec(idx, ctx->nr_user_bufs); 998 req->imu = READ_ONCE(ctx->user_bufs[idx]); 999 io_req_set_rsrc_node(notif, ctx, 0); 1000 } 1001 1002 if (req->opcode == IORING_OP_SEND_ZC) { 1003 if (READ_ONCE(sqe->__pad3[0])) 1004 return -EINVAL; 1005 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1006 zc->addr_len = READ_ONCE(sqe->addr_len); 1007 } else { 1008 if (unlikely(sqe->addr2 || sqe->file_index)) 1009 return -EINVAL; 1010 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF)) 1011 return -EINVAL; 1012 } 1013 1014 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1015 zc->len = READ_ONCE(sqe->len); 1016 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 1017 if (zc->msg_flags & MSG_DONTWAIT) 1018 req->flags |= REQ_F_NOWAIT; 1019 1020 zc->done_io = 0; 1021 1022 #ifdef CONFIG_COMPAT 1023 if (req->ctx->compat) 1024 zc->msg_flags |= MSG_CMSG_COMPAT; 1025 #endif 1026 return 0; 1027 } 1028 1029 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, 1030 struct iov_iter *from, size_t length) 1031 { 1032 skb_zcopy_downgrade_managed(skb); 1033 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1034 } 1035 1036 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, 1037 struct iov_iter *from, size_t length) 1038 { 1039 struct skb_shared_info *shinfo = skb_shinfo(skb); 1040 int frag = shinfo->nr_frags; 1041 int ret = 0; 1042 struct bvec_iter bi; 1043 ssize_t copied = 0; 1044 unsigned long truesize = 0; 1045 1046 if (!frag) 1047 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1048 else if (unlikely(!skb_zcopy_managed(skb))) 1049 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1050 1051 bi.bi_size = min(from->count, length); 1052 bi.bi_bvec_done = from->iov_offset; 1053 bi.bi_idx = 0; 1054 1055 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1056 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1057 1058 copied += v.bv_len; 1059 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1060 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1061 v.bv_offset, v.bv_len); 1062 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1063 } 1064 if (bi.bi_size) 1065 ret = -EMSGSIZE; 1066 1067 shinfo->nr_frags = frag; 1068 from->bvec += bi.bi_idx; 1069 from->nr_segs -= bi.bi_idx; 1070 from->count -= copied; 1071 from->iov_offset = bi.bi_bvec_done; 1072 1073 skb->data_len += copied; 1074 skb->len += copied; 1075 skb->truesize += truesize; 1076 1077 if (sk && sk->sk_type == SOCK_STREAM) { 1078 sk_wmem_queued_add(sk, truesize); 1079 if (!skb_zcopy_pure(skb)) 1080 sk_mem_charge(sk, truesize); 1081 } else { 1082 refcount_add(truesize, &skb->sk->sk_wmem_alloc); 1083 } 1084 return ret; 1085 } 1086 1087 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1088 { 1089 struct sockaddr_storage __address; 1090 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1091 struct msghdr msg; 1092 struct socket *sock; 1093 unsigned msg_flags; 1094 int ret, min_ret = 0; 1095 1096 sock = sock_from_file(req->file); 1097 if (unlikely(!sock)) 1098 return -ENOTSOCK; 1099 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1100 return -EOPNOTSUPP; 1101 1102 msg.msg_name = NULL; 1103 msg.msg_control = NULL; 1104 msg.msg_controllen = 0; 1105 msg.msg_namelen = 0; 1106 1107 if (zc->addr) { 1108 if (req_has_async_data(req)) { 1109 struct io_async_msghdr *io = req->async_data; 1110 1111 msg.msg_name = &io->addr; 1112 } else { 1113 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address); 1114 if (unlikely(ret < 0)) 1115 return ret; 1116 msg.msg_name = (struct sockaddr *)&__address; 1117 } 1118 msg.msg_namelen = zc->addr_len; 1119 } 1120 1121 if (!(req->flags & REQ_F_POLLED) && 1122 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1123 return io_setup_async_addr(req, &__address, issue_flags); 1124 1125 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1126 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu, 1127 (u64)(uintptr_t)zc->buf, zc->len); 1128 if (unlikely(ret)) 1129 return ret; 1130 msg.sg_from_iter = io_sg_from_iter; 1131 } else { 1132 io_notif_set_extended(zc->notif); 1133 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter); 1134 if (unlikely(ret)) 1135 return ret; 1136 ret = io_notif_account_mem(zc->notif, zc->len); 1137 if (unlikely(ret)) 1138 return ret; 1139 msg.sg_from_iter = io_sg_from_iter_iovec; 1140 } 1141 1142 msg_flags = zc->msg_flags | MSG_ZEROCOPY; 1143 if (issue_flags & IO_URING_F_NONBLOCK) 1144 msg_flags |= MSG_DONTWAIT; 1145 if (msg_flags & MSG_WAITALL) 1146 min_ret = iov_iter_count(&msg.msg_iter); 1147 1148 msg.msg_flags = msg_flags; 1149 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1150 ret = sock_sendmsg(sock, &msg); 1151 1152 if (unlikely(ret < min_ret)) { 1153 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1154 return io_setup_async_addr(req, &__address, issue_flags); 1155 1156 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { 1157 zc->len -= ret; 1158 zc->buf += ret; 1159 zc->done_io += ret; 1160 req->flags |= REQ_F_PARTIAL_IO; 1161 return io_setup_async_addr(req, &__address, issue_flags); 1162 } 1163 if (ret == -ERESTARTSYS) 1164 ret = -EINTR; 1165 req_set_fail(req); 1166 } 1167 1168 if (ret >= 0) 1169 ret += zc->done_io; 1170 else if (zc->done_io) 1171 ret = zc->done_io; 1172 1173 /* 1174 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1175 * flushing notif to io_send_zc_cleanup() 1176 */ 1177 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1178 io_notif_flush(zc->notif); 1179 req->flags &= ~REQ_F_NEED_CLEANUP; 1180 } 1181 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1182 return IOU_OK; 1183 } 1184 1185 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1186 { 1187 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1188 struct io_async_msghdr iomsg, *kmsg; 1189 struct socket *sock; 1190 unsigned flags; 1191 int ret, min_ret = 0; 1192 1193 io_notif_set_extended(sr->notif); 1194 1195 sock = sock_from_file(req->file); 1196 if (unlikely(!sock)) 1197 return -ENOTSOCK; 1198 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1199 return -EOPNOTSUPP; 1200 1201 if (req_has_async_data(req)) { 1202 kmsg = req->async_data; 1203 } else { 1204 ret = io_sendmsg_copy_hdr(req, &iomsg); 1205 if (ret) 1206 return ret; 1207 kmsg = &iomsg; 1208 } 1209 1210 if (!(req->flags & REQ_F_POLLED) && 1211 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1212 return io_setup_async_msg(req, kmsg, issue_flags); 1213 1214 flags = sr->msg_flags | MSG_ZEROCOPY; 1215 if (issue_flags & IO_URING_F_NONBLOCK) 1216 flags |= MSG_DONTWAIT; 1217 if (flags & MSG_WAITALL) 1218 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1219 1220 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1221 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1222 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1223 1224 if (unlikely(ret < min_ret)) { 1225 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1226 return io_setup_async_msg(req, kmsg, issue_flags); 1227 1228 if (ret > 0 && io_net_retry(sock, flags)) { 1229 sr->done_io += ret; 1230 req->flags |= REQ_F_PARTIAL_IO; 1231 return io_setup_async_msg(req, kmsg, issue_flags); 1232 } 1233 if (ret == -ERESTARTSYS) 1234 ret = -EINTR; 1235 req_set_fail(req); 1236 } 1237 /* fast path, check for non-NULL to avoid function call */ 1238 if (kmsg->free_iov) { 1239 kfree(kmsg->free_iov); 1240 kmsg->free_iov = NULL; 1241 } 1242 1243 io_netmsg_recycle(req, issue_flags); 1244 if (ret >= 0) 1245 ret += sr->done_io; 1246 else if (sr->done_io) 1247 ret = sr->done_io; 1248 1249 /* 1250 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1251 * flushing notif to io_send_zc_cleanup() 1252 */ 1253 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1254 io_notif_flush(sr->notif); 1255 req->flags &= ~REQ_F_NEED_CLEANUP; 1256 } 1257 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1258 return IOU_OK; 1259 } 1260 1261 void io_sendrecv_fail(struct io_kiocb *req) 1262 { 1263 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1264 1265 if (req->flags & REQ_F_PARTIAL_IO) 1266 req->cqe.res = sr->done_io; 1267 1268 if ((req->flags & REQ_F_NEED_CLEANUP) && 1269 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1270 req->cqe.flags |= IORING_CQE_F_MORE; 1271 } 1272 1273 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1274 { 1275 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1276 unsigned flags; 1277 1278 if (sqe->len || sqe->buf_index) 1279 return -EINVAL; 1280 1281 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1282 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1283 accept->flags = READ_ONCE(sqe->accept_flags); 1284 accept->nofile = rlimit(RLIMIT_NOFILE); 1285 flags = READ_ONCE(sqe->ioprio); 1286 if (flags & ~IORING_ACCEPT_MULTISHOT) 1287 return -EINVAL; 1288 1289 accept->file_slot = READ_ONCE(sqe->file_index); 1290 if (accept->file_slot) { 1291 if (accept->flags & SOCK_CLOEXEC) 1292 return -EINVAL; 1293 if (flags & IORING_ACCEPT_MULTISHOT && 1294 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1295 return -EINVAL; 1296 } 1297 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1298 return -EINVAL; 1299 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1300 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1301 if (flags & IORING_ACCEPT_MULTISHOT) 1302 req->flags |= REQ_F_APOLL_MULTISHOT; 1303 return 0; 1304 } 1305 1306 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1307 { 1308 struct io_ring_ctx *ctx = req->ctx; 1309 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1310 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1311 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; 1312 bool fixed = !!accept->file_slot; 1313 struct file *file; 1314 int ret, fd; 1315 1316 if (!io_check_multishot(req, issue_flags)) 1317 return -EAGAIN; 1318 retry: 1319 if (!fixed) { 1320 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1321 if (unlikely(fd < 0)) 1322 return fd; 1323 } 1324 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, 1325 accept->flags); 1326 if (IS_ERR(file)) { 1327 if (!fixed) 1328 put_unused_fd(fd); 1329 ret = PTR_ERR(file); 1330 if (ret == -EAGAIN && force_nonblock) { 1331 /* 1332 * if it's multishot and polled, we don't need to 1333 * return EAGAIN to arm the poll infra since it 1334 * has already been done 1335 */ 1336 if (issue_flags & IO_URING_F_MULTISHOT) 1337 ret = IOU_ISSUE_SKIP_COMPLETE; 1338 return ret; 1339 } 1340 if (ret == -ERESTARTSYS) 1341 ret = -EINTR; 1342 req_set_fail(req); 1343 } else if (!fixed) { 1344 fd_install(fd, file); 1345 ret = fd; 1346 } else { 1347 ret = io_fixed_fd_install(req, issue_flags, file, 1348 accept->file_slot); 1349 } 1350 1351 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1352 io_req_set_res(req, ret, 0); 1353 return IOU_OK; 1354 } 1355 1356 if (ret < 0) 1357 return ret; 1358 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, 1359 req->cqe.user_data, ret, IORING_CQE_F_MORE, true)) 1360 goto retry; 1361 1362 return -ECANCELED; 1363 } 1364 1365 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1366 { 1367 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1368 1369 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1370 return -EINVAL; 1371 1372 sock->domain = READ_ONCE(sqe->fd); 1373 sock->type = READ_ONCE(sqe->off); 1374 sock->protocol = READ_ONCE(sqe->len); 1375 sock->file_slot = READ_ONCE(sqe->file_index); 1376 sock->nofile = rlimit(RLIMIT_NOFILE); 1377 1378 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1379 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1380 return -EINVAL; 1381 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1382 return -EINVAL; 1383 return 0; 1384 } 1385 1386 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1387 { 1388 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1389 bool fixed = !!sock->file_slot; 1390 struct file *file; 1391 int ret, fd; 1392 1393 if (!fixed) { 1394 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1395 if (unlikely(fd < 0)) 1396 return fd; 1397 } 1398 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1399 if (IS_ERR(file)) { 1400 if (!fixed) 1401 put_unused_fd(fd); 1402 ret = PTR_ERR(file); 1403 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1404 return -EAGAIN; 1405 if (ret == -ERESTARTSYS) 1406 ret = -EINTR; 1407 req_set_fail(req); 1408 } else if (!fixed) { 1409 fd_install(fd, file); 1410 ret = fd; 1411 } else { 1412 ret = io_fixed_fd_install(req, issue_flags, file, 1413 sock->file_slot); 1414 } 1415 io_req_set_res(req, ret, 0); 1416 return IOU_OK; 1417 } 1418 1419 int io_connect_prep_async(struct io_kiocb *req) 1420 { 1421 struct io_async_connect *io = req->async_data; 1422 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1423 1424 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address); 1425 } 1426 1427 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1428 { 1429 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1430 1431 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1432 return -EINVAL; 1433 1434 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1435 conn->addr_len = READ_ONCE(sqe->addr2); 1436 conn->in_progress = conn->seen_econnaborted = false; 1437 return 0; 1438 } 1439 1440 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1441 { 1442 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1443 struct io_async_connect __io, *io; 1444 unsigned file_flags; 1445 int ret; 1446 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1447 1448 if (connect->in_progress) { 1449 struct socket *socket; 1450 1451 ret = -ENOTSOCK; 1452 socket = sock_from_file(req->file); 1453 if (socket) 1454 ret = sock_error(socket->sk); 1455 goto out; 1456 } 1457 1458 if (req_has_async_data(req)) { 1459 io = req->async_data; 1460 } else { 1461 ret = move_addr_to_kernel(connect->addr, 1462 connect->addr_len, 1463 &__io.address); 1464 if (ret) 1465 goto out; 1466 io = &__io; 1467 } 1468 1469 file_flags = force_nonblock ? O_NONBLOCK : 0; 1470 1471 ret = __sys_connect_file(req->file, &io->address, 1472 connect->addr_len, file_flags); 1473 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1474 && force_nonblock) { 1475 if (ret == -EINPROGRESS) { 1476 connect->in_progress = true; 1477 return -EAGAIN; 1478 } 1479 if (ret == -ECONNABORTED) { 1480 if (connect->seen_econnaborted) 1481 goto out; 1482 connect->seen_econnaborted = true; 1483 } 1484 if (req_has_async_data(req)) 1485 return -EAGAIN; 1486 if (io_alloc_async_data(req)) { 1487 ret = -ENOMEM; 1488 goto out; 1489 } 1490 memcpy(req->async_data, &__io, sizeof(__io)); 1491 return -EAGAIN; 1492 } 1493 if (ret == -ERESTARTSYS) 1494 ret = -EINTR; 1495 out: 1496 if (ret < 0) 1497 req_set_fail(req); 1498 io_req_set_res(req, ret, 0); 1499 return IOU_OK; 1500 } 1501 1502 void io_netmsg_cache_free(struct io_cache_entry *entry) 1503 { 1504 kfree(container_of(entry, struct io_async_msghdr, cache)); 1505 } 1506 #endif 1507