1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "io_uring.h" 14 #include "kbuf.h" 15 #include "alloc_cache.h" 16 #include "net.h" 17 #include "notif.h" 18 #include "rsrc.h" 19 20 #if defined(CONFIG_NET) 21 struct io_shutdown { 22 struct file *file; 23 int how; 24 }; 25 26 struct io_accept { 27 struct file *file; 28 struct sockaddr __user *addr; 29 int __user *addr_len; 30 int flags; 31 u32 file_slot; 32 unsigned long nofile; 33 }; 34 35 struct io_socket { 36 struct file *file; 37 int domain; 38 int type; 39 int protocol; 40 int flags; 41 u32 file_slot; 42 unsigned long nofile; 43 }; 44 45 struct io_connect { 46 struct file *file; 47 struct sockaddr __user *addr; 48 int addr_len; 49 bool in_progress; 50 bool seen_econnaborted; 51 }; 52 53 struct io_sr_msg { 54 struct file *file; 55 union { 56 struct compat_msghdr __user *umsg_compat; 57 struct user_msghdr __user *umsg; 58 void __user *buf; 59 }; 60 unsigned len; 61 unsigned done_io; 62 unsigned msg_flags; 63 u16 flags; 64 /* initialised and used only by !msg send variants */ 65 u16 addr_len; 66 u16 buf_group; 67 void __user *addr; 68 void __user *msg_control; 69 /* used only for send zerocopy */ 70 struct io_kiocb *notif; 71 }; 72 73 static inline bool io_check_multishot(struct io_kiocb *req, 74 unsigned int issue_flags) 75 { 76 /* 77 * When ->locked_cq is set we only allow to post CQEs from the original 78 * task context. Usual request completions will be handled in other 79 * generic paths but multipoll may decide to post extra cqes. 80 */ 81 return !(issue_flags & IO_URING_F_IOWQ) || 82 !(issue_flags & IO_URING_F_MULTISHOT) || 83 !req->ctx->task_complete; 84 } 85 86 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 87 { 88 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 89 90 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 91 sqe->buf_index || sqe->splice_fd_in)) 92 return -EINVAL; 93 94 shutdown->how = READ_ONCE(sqe->len); 95 req->flags |= REQ_F_FORCE_ASYNC; 96 return 0; 97 } 98 99 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 100 { 101 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 102 struct socket *sock; 103 int ret; 104 105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 106 107 sock = sock_from_file(req->file); 108 if (unlikely(!sock)) 109 return -ENOTSOCK; 110 111 ret = __sys_shutdown_sock(sock, shutdown->how); 112 io_req_set_res(req, ret, 0); 113 return IOU_OK; 114 } 115 116 static bool io_net_retry(struct socket *sock, int flags) 117 { 118 if (!(flags & MSG_WAITALL)) 119 return false; 120 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 121 } 122 123 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 124 { 125 struct io_async_msghdr *hdr = req->async_data; 126 127 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) 128 return; 129 130 /* Let normal cleanup path reap it if we fail adding to the cache */ 131 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { 132 req->async_data = NULL; 133 req->flags &= ~REQ_F_ASYNC_DATA; 134 } 135 } 136 137 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, 138 unsigned int issue_flags) 139 { 140 struct io_ring_ctx *ctx = req->ctx; 141 struct io_cache_entry *entry; 142 struct io_async_msghdr *hdr; 143 144 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 145 entry = io_alloc_cache_get(&ctx->netmsg_cache); 146 if (entry) { 147 hdr = container_of(entry, struct io_async_msghdr, cache); 148 hdr->free_iov = NULL; 149 req->flags |= REQ_F_ASYNC_DATA; 150 req->async_data = hdr; 151 return hdr; 152 } 153 } 154 155 if (!io_alloc_async_data(req)) { 156 hdr = req->async_data; 157 hdr->free_iov = NULL; 158 return hdr; 159 } 160 return NULL; 161 } 162 163 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req) 164 { 165 /* ->prep_async is always called from the submission context */ 166 return io_msg_alloc_async(req, 0); 167 } 168 169 static int io_setup_async_msg(struct io_kiocb *req, 170 struct io_async_msghdr *kmsg, 171 unsigned int issue_flags) 172 { 173 struct io_async_msghdr *async_msg; 174 175 if (req_has_async_data(req)) 176 return -EAGAIN; 177 async_msg = io_msg_alloc_async(req, issue_flags); 178 if (!async_msg) { 179 kfree(kmsg->free_iov); 180 return -ENOMEM; 181 } 182 req->flags |= REQ_F_NEED_CLEANUP; 183 memcpy(async_msg, kmsg, sizeof(*kmsg)); 184 if (async_msg->msg.msg_name) 185 async_msg->msg.msg_name = &async_msg->addr; 186 187 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs) 188 return -EAGAIN; 189 190 /* if were using fast_iov, set it to the new one */ 191 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) { 192 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov; 193 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx]; 194 } 195 196 return -EAGAIN; 197 } 198 199 static int io_sendmsg_copy_hdr(struct io_kiocb *req, 200 struct io_async_msghdr *iomsg) 201 { 202 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 203 int ret; 204 205 iomsg->msg.msg_name = &iomsg->addr; 206 iomsg->free_iov = iomsg->fast_iov; 207 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags, 208 &iomsg->free_iov); 209 /* save msg_control as sys_sendmsg() overwrites it */ 210 sr->msg_control = iomsg->msg.msg_control_user; 211 return ret; 212 } 213 214 int io_send_prep_async(struct io_kiocb *req) 215 { 216 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 217 struct io_async_msghdr *io; 218 int ret; 219 220 if (!zc->addr || req_has_async_data(req)) 221 return 0; 222 io = io_msg_alloc_async_prep(req); 223 if (!io) 224 return -ENOMEM; 225 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); 226 return ret; 227 } 228 229 static int io_setup_async_addr(struct io_kiocb *req, 230 struct sockaddr_storage *addr_storage, 231 unsigned int issue_flags) 232 { 233 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 234 struct io_async_msghdr *io; 235 236 if (!sr->addr || req_has_async_data(req)) 237 return -EAGAIN; 238 io = io_msg_alloc_async(req, issue_flags); 239 if (!io) 240 return -ENOMEM; 241 memcpy(&io->addr, addr_storage, sizeof(io->addr)); 242 return -EAGAIN; 243 } 244 245 int io_sendmsg_prep_async(struct io_kiocb *req) 246 { 247 int ret; 248 249 if (!io_msg_alloc_async_prep(req)) 250 return -ENOMEM; 251 ret = io_sendmsg_copy_hdr(req, req->async_data); 252 if (!ret) 253 req->flags |= REQ_F_NEED_CLEANUP; 254 return ret; 255 } 256 257 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 258 { 259 struct io_async_msghdr *io = req->async_data; 260 261 kfree(io->free_iov); 262 } 263 264 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 265 { 266 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 267 268 if (req->opcode == IORING_OP_SEND) { 269 if (READ_ONCE(sqe->__pad3[0])) 270 return -EINVAL; 271 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 272 sr->addr_len = READ_ONCE(sqe->addr_len); 273 } else if (sqe->addr2 || sqe->file_index) { 274 return -EINVAL; 275 } 276 277 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 278 sr->len = READ_ONCE(sqe->len); 279 sr->flags = READ_ONCE(sqe->ioprio); 280 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) 281 return -EINVAL; 282 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 283 if (sr->msg_flags & MSG_DONTWAIT) 284 req->flags |= REQ_F_NOWAIT; 285 286 #ifdef CONFIG_COMPAT 287 if (req->ctx->compat) 288 sr->msg_flags |= MSG_CMSG_COMPAT; 289 #endif 290 sr->done_io = 0; 291 return 0; 292 } 293 294 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 295 { 296 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 297 struct io_async_msghdr iomsg, *kmsg; 298 struct socket *sock; 299 unsigned flags; 300 int min_ret = 0; 301 int ret; 302 303 sock = sock_from_file(req->file); 304 if (unlikely(!sock)) 305 return -ENOTSOCK; 306 307 if (req_has_async_data(req)) { 308 kmsg = req->async_data; 309 kmsg->msg.msg_control_user = sr->msg_control; 310 } else { 311 ret = io_sendmsg_copy_hdr(req, &iomsg); 312 if (ret) 313 return ret; 314 kmsg = &iomsg; 315 } 316 317 if (!(req->flags & REQ_F_POLLED) && 318 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 319 return io_setup_async_msg(req, kmsg, issue_flags); 320 321 flags = sr->msg_flags; 322 if (issue_flags & IO_URING_F_NONBLOCK) 323 flags |= MSG_DONTWAIT; 324 if (flags & MSG_WAITALL) 325 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 326 327 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 328 329 if (ret < min_ret) { 330 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 331 return io_setup_async_msg(req, kmsg, issue_flags); 332 if (ret > 0 && io_net_retry(sock, flags)) { 333 kmsg->msg.msg_controllen = 0; 334 kmsg->msg.msg_control = NULL; 335 sr->done_io += ret; 336 req->flags |= REQ_F_PARTIAL_IO; 337 return io_setup_async_msg(req, kmsg, issue_flags); 338 } 339 if (ret == -ERESTARTSYS) 340 ret = -EINTR; 341 req_set_fail(req); 342 } 343 /* fast path, check for non-NULL to avoid function call */ 344 if (kmsg->free_iov) 345 kfree(kmsg->free_iov); 346 req->flags &= ~REQ_F_NEED_CLEANUP; 347 io_netmsg_recycle(req, issue_flags); 348 if (ret >= 0) 349 ret += sr->done_io; 350 else if (sr->done_io) 351 ret = sr->done_io; 352 io_req_set_res(req, ret, 0); 353 return IOU_OK; 354 } 355 356 int io_send(struct io_kiocb *req, unsigned int issue_flags) 357 { 358 struct sockaddr_storage __address; 359 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 360 struct msghdr msg; 361 struct socket *sock; 362 unsigned flags; 363 int min_ret = 0; 364 int ret; 365 366 msg.msg_name = NULL; 367 msg.msg_control = NULL; 368 msg.msg_controllen = 0; 369 msg.msg_namelen = 0; 370 msg.msg_ubuf = NULL; 371 372 if (sr->addr) { 373 if (req_has_async_data(req)) { 374 struct io_async_msghdr *io = req->async_data; 375 376 msg.msg_name = &io->addr; 377 } else { 378 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address); 379 if (unlikely(ret < 0)) 380 return ret; 381 msg.msg_name = (struct sockaddr *)&__address; 382 } 383 msg.msg_namelen = sr->addr_len; 384 } 385 386 if (!(req->flags & REQ_F_POLLED) && 387 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 388 return io_setup_async_addr(req, &__address, issue_flags); 389 390 sock = sock_from_file(req->file); 391 if (unlikely(!sock)) 392 return -ENOTSOCK; 393 394 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter); 395 if (unlikely(ret)) 396 return ret; 397 398 flags = sr->msg_flags; 399 if (issue_flags & IO_URING_F_NONBLOCK) 400 flags |= MSG_DONTWAIT; 401 if (flags & MSG_WAITALL) 402 min_ret = iov_iter_count(&msg.msg_iter); 403 404 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 405 msg.msg_flags = flags; 406 ret = sock_sendmsg(sock, &msg); 407 if (ret < min_ret) { 408 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 409 return io_setup_async_addr(req, &__address, issue_flags); 410 411 if (ret > 0 && io_net_retry(sock, flags)) { 412 sr->len -= ret; 413 sr->buf += ret; 414 sr->done_io += ret; 415 req->flags |= REQ_F_PARTIAL_IO; 416 return io_setup_async_addr(req, &__address, issue_flags); 417 } 418 if (ret == -ERESTARTSYS) 419 ret = -EINTR; 420 req_set_fail(req); 421 } 422 if (ret >= 0) 423 ret += sr->done_io; 424 else if (sr->done_io) 425 ret = sr->done_io; 426 io_req_set_res(req, ret, 0); 427 return IOU_OK; 428 } 429 430 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) 431 { 432 int hdr; 433 434 if (iomsg->namelen < 0) 435 return true; 436 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), 437 iomsg->namelen, &hdr)) 438 return true; 439 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) 440 return true; 441 442 return false; 443 } 444 445 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, 446 struct io_async_msghdr *iomsg) 447 { 448 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 449 struct user_msghdr msg; 450 int ret; 451 452 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) 453 return -EFAULT; 454 455 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 456 if (ret) 457 return ret; 458 459 if (req->flags & REQ_F_BUFFER_SELECT) { 460 if (msg.msg_iovlen == 0) { 461 sr->len = iomsg->fast_iov[0].iov_len = 0; 462 iomsg->fast_iov[0].iov_base = NULL; 463 iomsg->free_iov = NULL; 464 } else if (msg.msg_iovlen > 1) { 465 return -EINVAL; 466 } else { 467 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov))) 468 return -EFAULT; 469 sr->len = iomsg->fast_iov[0].iov_len; 470 iomsg->free_iov = NULL; 471 } 472 473 if (req->flags & REQ_F_APOLL_MULTISHOT) { 474 iomsg->namelen = msg.msg_namelen; 475 iomsg->controllen = msg.msg_controllen; 476 if (io_recvmsg_multishot_overflow(iomsg)) 477 return -EOVERFLOW; 478 } 479 } else { 480 iomsg->free_iov = iomsg->fast_iov; 481 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, 482 &iomsg->free_iov, &iomsg->msg.msg_iter, 483 false); 484 if (ret > 0) 485 ret = 0; 486 } 487 488 return ret; 489 } 490 491 #ifdef CONFIG_COMPAT 492 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, 493 struct io_async_msghdr *iomsg) 494 { 495 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 496 struct compat_msghdr msg; 497 struct compat_iovec __user *uiov; 498 int ret; 499 500 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) 501 return -EFAULT; 502 503 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); 504 if (ret) 505 return ret; 506 507 uiov = compat_ptr(msg.msg_iov); 508 if (req->flags & REQ_F_BUFFER_SELECT) { 509 compat_ssize_t clen; 510 511 iomsg->free_iov = NULL; 512 if (msg.msg_iovlen == 0) { 513 sr->len = 0; 514 } else if (msg.msg_iovlen > 1) { 515 return -EINVAL; 516 } else { 517 if (!access_ok(uiov, sizeof(*uiov))) 518 return -EFAULT; 519 if (__get_user(clen, &uiov->iov_len)) 520 return -EFAULT; 521 if (clen < 0) 522 return -EINVAL; 523 sr->len = clen; 524 } 525 526 if (req->flags & REQ_F_APOLL_MULTISHOT) { 527 iomsg->namelen = msg.msg_namelen; 528 iomsg->controllen = msg.msg_controllen; 529 if (io_recvmsg_multishot_overflow(iomsg)) 530 return -EOVERFLOW; 531 } 532 } else { 533 iomsg->free_iov = iomsg->fast_iov; 534 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen, 535 UIO_FASTIOV, &iomsg->free_iov, 536 &iomsg->msg.msg_iter, true); 537 if (ret < 0) 538 return ret; 539 } 540 541 return 0; 542 } 543 #endif 544 545 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 546 struct io_async_msghdr *iomsg) 547 { 548 iomsg->msg.msg_name = &iomsg->addr; 549 iomsg->msg.msg_iter.nr_segs = 0; 550 551 #ifdef CONFIG_COMPAT 552 if (req->ctx->compat) 553 return __io_compat_recvmsg_copy_hdr(req, iomsg); 554 #endif 555 556 return __io_recvmsg_copy_hdr(req, iomsg); 557 } 558 559 int io_recvmsg_prep_async(struct io_kiocb *req) 560 { 561 int ret; 562 563 if (!io_msg_alloc_async_prep(req)) 564 return -ENOMEM; 565 ret = io_recvmsg_copy_hdr(req, req->async_data); 566 if (!ret) 567 req->flags |= REQ_F_NEED_CLEANUP; 568 return ret; 569 } 570 571 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT) 572 573 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 574 { 575 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 576 577 if (unlikely(sqe->file_index || sqe->addr2)) 578 return -EINVAL; 579 580 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 581 sr->len = READ_ONCE(sqe->len); 582 sr->flags = READ_ONCE(sqe->ioprio); 583 if (sr->flags & ~(RECVMSG_FLAGS)) 584 return -EINVAL; 585 sr->msg_flags = READ_ONCE(sqe->msg_flags); 586 if (sr->msg_flags & MSG_DONTWAIT) 587 req->flags |= REQ_F_NOWAIT; 588 if (sr->msg_flags & MSG_ERRQUEUE) 589 req->flags |= REQ_F_CLEAR_POLLIN; 590 if (sr->flags & IORING_RECV_MULTISHOT) { 591 if (!(req->flags & REQ_F_BUFFER_SELECT)) 592 return -EINVAL; 593 if (sr->msg_flags & MSG_WAITALL) 594 return -EINVAL; 595 if (req->opcode == IORING_OP_RECV && sr->len) 596 return -EINVAL; 597 req->flags |= REQ_F_APOLL_MULTISHOT; 598 /* 599 * Store the buffer group for this multishot receive separately, 600 * as if we end up doing an io-wq based issue that selects a 601 * buffer, it has to be committed immediately and that will 602 * clear ->buf_list. This means we lose the link to the buffer 603 * list, and the eventual buffer put on completion then cannot 604 * restore it. 605 */ 606 sr->buf_group = req->buf_index; 607 } 608 609 #ifdef CONFIG_COMPAT 610 if (req->ctx->compat) 611 sr->msg_flags |= MSG_CMSG_COMPAT; 612 #endif 613 sr->done_io = 0; 614 return 0; 615 } 616 617 static inline void io_recv_prep_retry(struct io_kiocb *req) 618 { 619 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 620 621 sr->done_io = 0; 622 sr->len = 0; /* get from the provided buffer */ 623 req->buf_index = sr->buf_group; 624 } 625 626 /* 627 * Finishes io_recv and io_recvmsg. 628 * 629 * Returns true if it is actually finished, or false if it should run 630 * again (for multishot). 631 */ 632 static inline bool io_recv_finish(struct io_kiocb *req, int *ret, 633 struct msghdr *msg, bool mshot_finished, 634 unsigned issue_flags) 635 { 636 unsigned int cflags; 637 638 cflags = io_put_kbuf(req, issue_flags); 639 if (msg->msg_inq && msg->msg_inq != -1) 640 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 641 642 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 643 io_req_set_res(req, *ret, cflags); 644 *ret = IOU_OK; 645 return true; 646 } 647 648 if (!mshot_finished) { 649 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, 650 *ret, cflags | IORING_CQE_F_MORE)) { 651 io_recv_prep_retry(req); 652 /* Known not-empty or unknown state, retry */ 653 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || 654 msg->msg_inq == -1) 655 return false; 656 if (issue_flags & IO_URING_F_MULTISHOT) 657 *ret = IOU_ISSUE_SKIP_COMPLETE; 658 else 659 *ret = -EAGAIN; 660 return true; 661 } 662 /* Otherwise stop multishot but use the current result. */ 663 } 664 665 io_req_set_res(req, *ret, cflags); 666 667 if (issue_flags & IO_URING_F_MULTISHOT) 668 *ret = IOU_STOP_MULTISHOT; 669 else 670 *ret = IOU_OK; 671 return true; 672 } 673 674 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 675 struct io_sr_msg *sr, void __user **buf, 676 size_t *len) 677 { 678 unsigned long ubuf = (unsigned long) *buf; 679 unsigned long hdr; 680 681 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 682 kmsg->controllen; 683 if (*len < hdr) 684 return -EFAULT; 685 686 if (kmsg->controllen) { 687 unsigned long control = ubuf + hdr - kmsg->controllen; 688 689 kmsg->msg.msg_control_user = (void __user *) control; 690 kmsg->msg.msg_controllen = kmsg->controllen; 691 } 692 693 sr->buf = *buf; /* stash for later copy */ 694 *buf = (void __user *) (ubuf + hdr); 695 kmsg->payloadlen = *len = *len - hdr; 696 return 0; 697 } 698 699 struct io_recvmsg_multishot_hdr { 700 struct io_uring_recvmsg_out msg; 701 struct sockaddr_storage addr; 702 }; 703 704 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 705 struct io_async_msghdr *kmsg, 706 unsigned int flags, bool *finished) 707 { 708 int err; 709 int copy_len; 710 struct io_recvmsg_multishot_hdr hdr; 711 712 if (kmsg->namelen) 713 kmsg->msg.msg_name = &hdr.addr; 714 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 715 kmsg->msg.msg_namelen = 0; 716 717 if (sock->file->f_flags & O_NONBLOCK) 718 flags |= MSG_DONTWAIT; 719 720 err = sock_recvmsg(sock, &kmsg->msg, flags); 721 *finished = err <= 0; 722 if (err < 0) 723 return err; 724 725 hdr.msg = (struct io_uring_recvmsg_out) { 726 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 727 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 728 }; 729 730 hdr.msg.payloadlen = err; 731 if (err > kmsg->payloadlen) 732 err = kmsg->payloadlen; 733 734 copy_len = sizeof(struct io_uring_recvmsg_out); 735 if (kmsg->msg.msg_namelen > kmsg->namelen) 736 copy_len += kmsg->namelen; 737 else 738 copy_len += kmsg->msg.msg_namelen; 739 740 /* 741 * "fromlen shall refer to the value before truncation.." 742 * 1003.1g 743 */ 744 hdr.msg.namelen = kmsg->msg.msg_namelen; 745 746 /* ensure that there is no gap between hdr and sockaddr_storage */ 747 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 748 sizeof(struct io_uring_recvmsg_out)); 749 if (copy_to_user(io->buf, &hdr, copy_len)) { 750 *finished = true; 751 return -EFAULT; 752 } 753 754 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 755 kmsg->controllen + err; 756 } 757 758 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 759 { 760 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 761 struct io_async_msghdr iomsg, *kmsg; 762 struct socket *sock; 763 unsigned flags; 764 int ret, min_ret = 0; 765 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 766 bool mshot_finished = true; 767 768 sock = sock_from_file(req->file); 769 if (unlikely(!sock)) 770 return -ENOTSOCK; 771 772 if (req_has_async_data(req)) { 773 kmsg = req->async_data; 774 } else { 775 ret = io_recvmsg_copy_hdr(req, &iomsg); 776 if (ret) 777 return ret; 778 kmsg = &iomsg; 779 } 780 781 if (!(req->flags & REQ_F_POLLED) && 782 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 783 return io_setup_async_msg(req, kmsg, issue_flags); 784 785 if (!io_check_multishot(req, issue_flags)) 786 return io_setup_async_msg(req, kmsg, issue_flags); 787 788 retry_multishot: 789 if (io_do_buffer_select(req)) { 790 void __user *buf; 791 size_t len = sr->len; 792 793 buf = io_buffer_select(req, &len, issue_flags); 794 if (!buf) 795 return -ENOBUFS; 796 797 if (req->flags & REQ_F_APOLL_MULTISHOT) { 798 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); 799 if (ret) { 800 io_kbuf_recycle(req, issue_flags); 801 return ret; 802 } 803 } 804 805 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); 806 } 807 808 flags = sr->msg_flags; 809 if (force_nonblock) 810 flags |= MSG_DONTWAIT; 811 812 kmsg->msg.msg_get_inq = 1; 813 kmsg->msg.msg_inq = -1; 814 if (req->flags & REQ_F_APOLL_MULTISHOT) { 815 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 816 &mshot_finished); 817 } else { 818 /* disable partial retry for recvmsg with cmsg attached */ 819 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) 820 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 821 822 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 823 kmsg->uaddr, flags); 824 } 825 826 if (ret < min_ret) { 827 if (ret == -EAGAIN && force_nonblock) { 828 ret = io_setup_async_msg(req, kmsg, issue_flags); 829 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) { 830 io_kbuf_recycle(req, issue_flags); 831 return IOU_ISSUE_SKIP_COMPLETE; 832 } 833 return ret; 834 } 835 if (ret > 0 && io_net_retry(sock, flags)) { 836 sr->done_io += ret; 837 req->flags |= REQ_F_PARTIAL_IO; 838 return io_setup_async_msg(req, kmsg, issue_flags); 839 } 840 if (ret == -ERESTARTSYS) 841 ret = -EINTR; 842 req_set_fail(req); 843 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 844 req_set_fail(req); 845 } 846 847 if (ret > 0) 848 ret += sr->done_io; 849 else if (sr->done_io) 850 ret = sr->done_io; 851 else 852 io_kbuf_recycle(req, issue_flags); 853 854 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags)) 855 goto retry_multishot; 856 857 if (mshot_finished) { 858 /* fast path, check for non-NULL to avoid function call */ 859 if (kmsg->free_iov) 860 kfree(kmsg->free_iov); 861 io_netmsg_recycle(req, issue_flags); 862 req->flags &= ~REQ_F_NEED_CLEANUP; 863 } 864 865 return ret; 866 } 867 868 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 869 { 870 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 871 struct msghdr msg; 872 struct socket *sock; 873 unsigned flags; 874 int ret, min_ret = 0; 875 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 876 size_t len = sr->len; 877 878 if (!(req->flags & REQ_F_POLLED) && 879 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 880 return -EAGAIN; 881 882 if (!io_check_multishot(req, issue_flags)) 883 return -EAGAIN; 884 885 sock = sock_from_file(req->file); 886 if (unlikely(!sock)) 887 return -ENOTSOCK; 888 889 msg.msg_name = NULL; 890 msg.msg_namelen = 0; 891 msg.msg_control = NULL; 892 msg.msg_get_inq = 1; 893 msg.msg_controllen = 0; 894 msg.msg_iocb = NULL; 895 msg.msg_ubuf = NULL; 896 897 retry_multishot: 898 if (io_do_buffer_select(req)) { 899 void __user *buf; 900 901 buf = io_buffer_select(req, &len, issue_flags); 902 if (!buf) 903 return -ENOBUFS; 904 sr->buf = buf; 905 sr->len = len; 906 } 907 908 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter); 909 if (unlikely(ret)) 910 goto out_free; 911 912 msg.msg_inq = -1; 913 msg.msg_flags = 0; 914 915 flags = sr->msg_flags; 916 if (force_nonblock) 917 flags |= MSG_DONTWAIT; 918 if (flags & MSG_WAITALL) 919 min_ret = iov_iter_count(&msg.msg_iter); 920 921 ret = sock_recvmsg(sock, &msg, flags); 922 if (ret < min_ret) { 923 if (ret == -EAGAIN && force_nonblock) { 924 if (issue_flags & IO_URING_F_MULTISHOT) { 925 io_kbuf_recycle(req, issue_flags); 926 return IOU_ISSUE_SKIP_COMPLETE; 927 } 928 929 return -EAGAIN; 930 } 931 if (ret > 0 && io_net_retry(sock, flags)) { 932 sr->len -= ret; 933 sr->buf += ret; 934 sr->done_io += ret; 935 req->flags |= REQ_F_PARTIAL_IO; 936 return -EAGAIN; 937 } 938 if (ret == -ERESTARTSYS) 939 ret = -EINTR; 940 req_set_fail(req); 941 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 942 out_free: 943 req_set_fail(req); 944 } 945 946 if (ret > 0) 947 ret += sr->done_io; 948 else if (sr->done_io) 949 ret = sr->done_io; 950 else 951 io_kbuf_recycle(req, issue_flags); 952 953 if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags)) 954 goto retry_multishot; 955 956 return ret; 957 } 958 959 void io_send_zc_cleanup(struct io_kiocb *req) 960 { 961 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 962 struct io_async_msghdr *io; 963 964 if (req_has_async_data(req)) { 965 io = req->async_data; 966 /* might be ->fast_iov if *msg_copy_hdr failed */ 967 if (io->free_iov != io->fast_iov) 968 kfree(io->free_iov); 969 } 970 if (zc->notif) { 971 io_notif_flush(zc->notif); 972 zc->notif = NULL; 973 } 974 } 975 976 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 977 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 978 979 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 980 { 981 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 982 struct io_ring_ctx *ctx = req->ctx; 983 struct io_kiocb *notif; 984 985 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 986 return -EINVAL; 987 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 988 if (req->flags & REQ_F_CQE_SKIP) 989 return -EINVAL; 990 991 notif = zc->notif = io_alloc_notif(ctx); 992 if (!notif) 993 return -ENOMEM; 994 notif->cqe.user_data = req->cqe.user_data; 995 notif->cqe.res = 0; 996 notif->cqe.flags = IORING_CQE_F_NOTIF; 997 req->flags |= REQ_F_NEED_CLEANUP; 998 999 zc->flags = READ_ONCE(sqe->ioprio); 1000 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 1001 if (zc->flags & ~IO_ZC_FLAGS_VALID) 1002 return -EINVAL; 1003 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 1004 io_notif_set_extended(notif); 1005 io_notif_to_data(notif)->zc_report = true; 1006 } 1007 } 1008 1009 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1010 unsigned idx = READ_ONCE(sqe->buf_index); 1011 1012 if (unlikely(idx >= ctx->nr_user_bufs)) 1013 return -EFAULT; 1014 idx = array_index_nospec(idx, ctx->nr_user_bufs); 1015 req->imu = READ_ONCE(ctx->user_bufs[idx]); 1016 io_req_set_rsrc_node(notif, ctx, 0); 1017 } 1018 1019 if (req->opcode == IORING_OP_SEND_ZC) { 1020 if (READ_ONCE(sqe->__pad3[0])) 1021 return -EINVAL; 1022 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1023 zc->addr_len = READ_ONCE(sqe->addr_len); 1024 } else { 1025 if (unlikely(sqe->addr2 || sqe->file_index)) 1026 return -EINVAL; 1027 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF)) 1028 return -EINVAL; 1029 } 1030 1031 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1032 zc->len = READ_ONCE(sqe->len); 1033 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 1034 if (zc->msg_flags & MSG_DONTWAIT) 1035 req->flags |= REQ_F_NOWAIT; 1036 1037 zc->done_io = 0; 1038 1039 #ifdef CONFIG_COMPAT 1040 if (req->ctx->compat) 1041 zc->msg_flags |= MSG_CMSG_COMPAT; 1042 #endif 1043 return 0; 1044 } 1045 1046 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, 1047 struct iov_iter *from, size_t length) 1048 { 1049 skb_zcopy_downgrade_managed(skb); 1050 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1051 } 1052 1053 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, 1054 struct iov_iter *from, size_t length) 1055 { 1056 struct skb_shared_info *shinfo = skb_shinfo(skb); 1057 int frag = shinfo->nr_frags; 1058 int ret = 0; 1059 struct bvec_iter bi; 1060 ssize_t copied = 0; 1061 unsigned long truesize = 0; 1062 1063 if (!frag) 1064 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1065 else if (unlikely(!skb_zcopy_managed(skb))) 1066 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); 1067 1068 bi.bi_size = min(from->count, length); 1069 bi.bi_bvec_done = from->iov_offset; 1070 bi.bi_idx = 0; 1071 1072 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1073 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1074 1075 copied += v.bv_len; 1076 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1077 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1078 v.bv_offset, v.bv_len); 1079 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1080 } 1081 if (bi.bi_size) 1082 ret = -EMSGSIZE; 1083 1084 shinfo->nr_frags = frag; 1085 from->bvec += bi.bi_idx; 1086 from->nr_segs -= bi.bi_idx; 1087 from->count -= copied; 1088 from->iov_offset = bi.bi_bvec_done; 1089 1090 skb->data_len += copied; 1091 skb->len += copied; 1092 skb->truesize += truesize; 1093 1094 if (sk && sk->sk_type == SOCK_STREAM) { 1095 sk_wmem_queued_add(sk, truesize); 1096 if (!skb_zcopy_pure(skb)) 1097 sk_mem_charge(sk, truesize); 1098 } else { 1099 refcount_add(truesize, &skb->sk->sk_wmem_alloc); 1100 } 1101 return ret; 1102 } 1103 1104 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1105 { 1106 struct sockaddr_storage __address; 1107 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1108 struct msghdr msg; 1109 struct socket *sock; 1110 unsigned msg_flags; 1111 int ret, min_ret = 0; 1112 1113 sock = sock_from_file(req->file); 1114 if (unlikely(!sock)) 1115 return -ENOTSOCK; 1116 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1117 return -EOPNOTSUPP; 1118 1119 msg.msg_name = NULL; 1120 msg.msg_control = NULL; 1121 msg.msg_controllen = 0; 1122 msg.msg_namelen = 0; 1123 1124 if (zc->addr) { 1125 if (req_has_async_data(req)) { 1126 struct io_async_msghdr *io = req->async_data; 1127 1128 msg.msg_name = &io->addr; 1129 } else { 1130 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address); 1131 if (unlikely(ret < 0)) 1132 return ret; 1133 msg.msg_name = (struct sockaddr *)&__address; 1134 } 1135 msg.msg_namelen = zc->addr_len; 1136 } 1137 1138 if (!(req->flags & REQ_F_POLLED) && 1139 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1140 return io_setup_async_addr(req, &__address, issue_flags); 1141 1142 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1143 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu, 1144 (u64)(uintptr_t)zc->buf, zc->len); 1145 if (unlikely(ret)) 1146 return ret; 1147 msg.sg_from_iter = io_sg_from_iter; 1148 } else { 1149 io_notif_set_extended(zc->notif); 1150 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter); 1151 if (unlikely(ret)) 1152 return ret; 1153 ret = io_notif_account_mem(zc->notif, zc->len); 1154 if (unlikely(ret)) 1155 return ret; 1156 msg.sg_from_iter = io_sg_from_iter_iovec; 1157 } 1158 1159 msg_flags = zc->msg_flags | MSG_ZEROCOPY; 1160 if (issue_flags & IO_URING_F_NONBLOCK) 1161 msg_flags |= MSG_DONTWAIT; 1162 if (msg_flags & MSG_WAITALL) 1163 min_ret = iov_iter_count(&msg.msg_iter); 1164 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 1165 1166 msg.msg_flags = msg_flags; 1167 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1168 ret = sock_sendmsg(sock, &msg); 1169 1170 if (unlikely(ret < min_ret)) { 1171 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1172 return io_setup_async_addr(req, &__address, issue_flags); 1173 1174 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { 1175 zc->len -= ret; 1176 zc->buf += ret; 1177 zc->done_io += ret; 1178 req->flags |= REQ_F_PARTIAL_IO; 1179 return io_setup_async_addr(req, &__address, issue_flags); 1180 } 1181 if (ret == -ERESTARTSYS) 1182 ret = -EINTR; 1183 req_set_fail(req); 1184 } 1185 1186 if (ret >= 0) 1187 ret += zc->done_io; 1188 else if (zc->done_io) 1189 ret = zc->done_io; 1190 1191 /* 1192 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1193 * flushing notif to io_send_zc_cleanup() 1194 */ 1195 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1196 io_notif_flush(zc->notif); 1197 req->flags &= ~REQ_F_NEED_CLEANUP; 1198 } 1199 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1200 return IOU_OK; 1201 } 1202 1203 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1204 { 1205 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1206 struct io_async_msghdr iomsg, *kmsg; 1207 struct socket *sock; 1208 unsigned flags; 1209 int ret, min_ret = 0; 1210 1211 io_notif_set_extended(sr->notif); 1212 1213 sock = sock_from_file(req->file); 1214 if (unlikely(!sock)) 1215 return -ENOTSOCK; 1216 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1217 return -EOPNOTSUPP; 1218 1219 if (req_has_async_data(req)) { 1220 kmsg = req->async_data; 1221 } else { 1222 ret = io_sendmsg_copy_hdr(req, &iomsg); 1223 if (ret) 1224 return ret; 1225 kmsg = &iomsg; 1226 } 1227 1228 if (!(req->flags & REQ_F_POLLED) && 1229 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1230 return io_setup_async_msg(req, kmsg, issue_flags); 1231 1232 flags = sr->msg_flags | MSG_ZEROCOPY; 1233 if (issue_flags & IO_URING_F_NONBLOCK) 1234 flags |= MSG_DONTWAIT; 1235 if (flags & MSG_WAITALL) 1236 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1237 1238 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1239 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1240 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1241 1242 if (unlikely(ret < min_ret)) { 1243 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1244 return io_setup_async_msg(req, kmsg, issue_flags); 1245 1246 if (ret > 0 && io_net_retry(sock, flags)) { 1247 sr->done_io += ret; 1248 req->flags |= REQ_F_PARTIAL_IO; 1249 return io_setup_async_msg(req, kmsg, issue_flags); 1250 } 1251 if (ret == -ERESTARTSYS) 1252 ret = -EINTR; 1253 req_set_fail(req); 1254 } 1255 /* fast path, check for non-NULL to avoid function call */ 1256 if (kmsg->free_iov) { 1257 kfree(kmsg->free_iov); 1258 kmsg->free_iov = NULL; 1259 } 1260 1261 io_netmsg_recycle(req, issue_flags); 1262 if (ret >= 0) 1263 ret += sr->done_io; 1264 else if (sr->done_io) 1265 ret = sr->done_io; 1266 1267 /* 1268 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1269 * flushing notif to io_send_zc_cleanup() 1270 */ 1271 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1272 io_notif_flush(sr->notif); 1273 req->flags &= ~REQ_F_NEED_CLEANUP; 1274 } 1275 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1276 return IOU_OK; 1277 } 1278 1279 void io_sendrecv_fail(struct io_kiocb *req) 1280 { 1281 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1282 1283 if (req->flags & REQ_F_PARTIAL_IO) 1284 req->cqe.res = sr->done_io; 1285 1286 if ((req->flags & REQ_F_NEED_CLEANUP) && 1287 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1288 req->cqe.flags |= IORING_CQE_F_MORE; 1289 } 1290 1291 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1292 { 1293 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1294 unsigned flags; 1295 1296 if (sqe->len || sqe->buf_index) 1297 return -EINVAL; 1298 1299 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1300 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1301 accept->flags = READ_ONCE(sqe->accept_flags); 1302 accept->nofile = rlimit(RLIMIT_NOFILE); 1303 flags = READ_ONCE(sqe->ioprio); 1304 if (flags & ~IORING_ACCEPT_MULTISHOT) 1305 return -EINVAL; 1306 1307 accept->file_slot = READ_ONCE(sqe->file_index); 1308 if (accept->file_slot) { 1309 if (accept->flags & SOCK_CLOEXEC) 1310 return -EINVAL; 1311 if (flags & IORING_ACCEPT_MULTISHOT && 1312 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1313 return -EINVAL; 1314 } 1315 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1316 return -EINVAL; 1317 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1318 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1319 if (flags & IORING_ACCEPT_MULTISHOT) 1320 req->flags |= REQ_F_APOLL_MULTISHOT; 1321 return 0; 1322 } 1323 1324 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1325 { 1326 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1327 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1328 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; 1329 bool fixed = !!accept->file_slot; 1330 struct file *file; 1331 int ret, fd; 1332 1333 if (!io_check_multishot(req, issue_flags)) 1334 return -EAGAIN; 1335 retry: 1336 if (!fixed) { 1337 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1338 if (unlikely(fd < 0)) 1339 return fd; 1340 } 1341 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, 1342 accept->flags); 1343 if (IS_ERR(file)) { 1344 if (!fixed) 1345 put_unused_fd(fd); 1346 ret = PTR_ERR(file); 1347 if (ret == -EAGAIN && force_nonblock) { 1348 /* 1349 * if it's multishot and polled, we don't need to 1350 * return EAGAIN to arm the poll infra since it 1351 * has already been done 1352 */ 1353 if (issue_flags & IO_URING_F_MULTISHOT) 1354 ret = IOU_ISSUE_SKIP_COMPLETE; 1355 return ret; 1356 } 1357 if (ret == -ERESTARTSYS) 1358 ret = -EINTR; 1359 req_set_fail(req); 1360 } else if (!fixed) { 1361 fd_install(fd, file); 1362 ret = fd; 1363 } else { 1364 ret = io_fixed_fd_install(req, issue_flags, file, 1365 accept->file_slot); 1366 } 1367 1368 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1369 io_req_set_res(req, ret, 0); 1370 return IOU_OK; 1371 } 1372 1373 if (ret < 0) 1374 return ret; 1375 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER, 1376 ret, IORING_CQE_F_MORE)) 1377 goto retry; 1378 1379 return -ECANCELED; 1380 } 1381 1382 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1383 { 1384 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1385 1386 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1387 return -EINVAL; 1388 1389 sock->domain = READ_ONCE(sqe->fd); 1390 sock->type = READ_ONCE(sqe->off); 1391 sock->protocol = READ_ONCE(sqe->len); 1392 sock->file_slot = READ_ONCE(sqe->file_index); 1393 sock->nofile = rlimit(RLIMIT_NOFILE); 1394 1395 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1396 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1397 return -EINVAL; 1398 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1399 return -EINVAL; 1400 return 0; 1401 } 1402 1403 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1404 { 1405 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1406 bool fixed = !!sock->file_slot; 1407 struct file *file; 1408 int ret, fd; 1409 1410 if (!fixed) { 1411 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1412 if (unlikely(fd < 0)) 1413 return fd; 1414 } 1415 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1416 if (IS_ERR(file)) { 1417 if (!fixed) 1418 put_unused_fd(fd); 1419 ret = PTR_ERR(file); 1420 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1421 return -EAGAIN; 1422 if (ret == -ERESTARTSYS) 1423 ret = -EINTR; 1424 req_set_fail(req); 1425 } else if (!fixed) { 1426 fd_install(fd, file); 1427 ret = fd; 1428 } else { 1429 ret = io_fixed_fd_install(req, issue_flags, file, 1430 sock->file_slot); 1431 } 1432 io_req_set_res(req, ret, 0); 1433 return IOU_OK; 1434 } 1435 1436 int io_connect_prep_async(struct io_kiocb *req) 1437 { 1438 struct io_async_connect *io = req->async_data; 1439 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1440 1441 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address); 1442 } 1443 1444 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1445 { 1446 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1447 1448 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1449 return -EINVAL; 1450 1451 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1452 conn->addr_len = READ_ONCE(sqe->addr2); 1453 conn->in_progress = conn->seen_econnaborted = false; 1454 return 0; 1455 } 1456 1457 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1458 { 1459 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1460 struct io_async_connect __io, *io; 1461 unsigned file_flags; 1462 int ret; 1463 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1464 1465 if (req_has_async_data(req)) { 1466 io = req->async_data; 1467 } else { 1468 ret = move_addr_to_kernel(connect->addr, 1469 connect->addr_len, 1470 &__io.address); 1471 if (ret) 1472 goto out; 1473 io = &__io; 1474 } 1475 1476 file_flags = force_nonblock ? O_NONBLOCK : 0; 1477 1478 ret = __sys_connect_file(req->file, &io->address, 1479 connect->addr_len, file_flags); 1480 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1481 && force_nonblock) { 1482 if (ret == -EINPROGRESS) { 1483 connect->in_progress = true; 1484 } else if (ret == -ECONNABORTED) { 1485 if (connect->seen_econnaborted) 1486 goto out; 1487 connect->seen_econnaborted = true; 1488 } 1489 if (req_has_async_data(req)) 1490 return -EAGAIN; 1491 if (io_alloc_async_data(req)) { 1492 ret = -ENOMEM; 1493 goto out; 1494 } 1495 memcpy(req->async_data, &__io, sizeof(__io)); 1496 return -EAGAIN; 1497 } 1498 if (connect->in_progress) { 1499 /* 1500 * At least bluetooth will return -EBADFD on a re-connect 1501 * attempt, and it's (supposedly) also valid to get -EISCONN 1502 * which means the previous result is good. For both of these, 1503 * grab the sock_error() and use that for the completion. 1504 */ 1505 if (ret == -EBADFD || ret == -EISCONN) 1506 ret = sock_error(sock_from_file(req->file)->sk); 1507 } 1508 if (ret == -ERESTARTSYS) 1509 ret = -EINTR; 1510 out: 1511 if (ret < 0) 1512 req_set_fail(req); 1513 io_req_set_res(req, ret, 0); 1514 return IOU_OK; 1515 } 1516 1517 void io_netmsg_cache_free(struct io_cache_entry *entry) 1518 { 1519 kfree(container_of(entry, struct io_async_msghdr, cache)); 1520 } 1521 #endif 1522