1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 #include <linux/security.h> 7 8 #include <uapi/linux/io_uring.h> 9 10 #include "io_uring.h" 11 #include "uring_cmd.h" 12 13 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked) 14 { 15 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 16 17 ioucmd->task_work_cb(ioucmd); 18 } 19 20 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, 21 void (*task_work_cb)(struct io_uring_cmd *)) 22 { 23 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); 24 25 ioucmd->task_work_cb = task_work_cb; 26 req->io_task_work.func = io_uring_cmd_work; 27 io_req_task_work_add(req); 28 } 29 EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); 30 31 static inline void io_req_set_cqe32_extra(struct io_kiocb *req, 32 u64 extra1, u64 extra2) 33 { 34 req->extra1 = extra1; 35 req->extra2 = extra2; 36 req->flags |= REQ_F_CQE32_INIT; 37 } 38 39 /* 40 * Called by consumers of io_uring_cmd, if they originally returned 41 * -EIOCBQUEUED upon receiving the command. 42 */ 43 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) 44 { 45 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); 46 47 if (ret < 0) 48 req_set_fail(req); 49 50 io_req_set_res(req, ret, 0); 51 if (req->ctx->flags & IORING_SETUP_CQE32) 52 io_req_set_cqe32_extra(req, res2, 0); 53 __io_req_complete(req, 0); 54 } 55 EXPORT_SYMBOL_GPL(io_uring_cmd_done); 56 57 int io_uring_cmd_prep_async(struct io_kiocb *req) 58 { 59 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 60 size_t cmd_size; 61 62 BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16); 63 BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80); 64 65 cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128); 66 67 memcpy(req->async_data, ioucmd->cmd, cmd_size); 68 return 0; 69 } 70 71 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 72 { 73 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 74 75 if (sqe->rw_flags || sqe->__pad1) 76 return -EINVAL; 77 ioucmd->cmd = sqe->cmd; 78 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); 79 return 0; 80 } 81 82 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) 83 { 84 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 85 struct io_ring_ctx *ctx = req->ctx; 86 struct file *file = req->file; 87 int ret; 88 89 if (!req->file->f_op->uring_cmd) 90 return -EOPNOTSUPP; 91 92 ret = security_uring_cmd(ioucmd); 93 if (ret) 94 return ret; 95 96 if (ctx->flags & IORING_SETUP_SQE128) 97 issue_flags |= IO_URING_F_SQE128; 98 if (ctx->flags & IORING_SETUP_CQE32) 99 issue_flags |= IO_URING_F_CQE32; 100 if (ctx->flags & IORING_SETUP_IOPOLL) 101 issue_flags |= IO_URING_F_IOPOLL; 102 103 if (req_has_async_data(req)) 104 ioucmd->cmd = req->async_data; 105 106 ret = file->f_op->uring_cmd(ioucmd, issue_flags); 107 if (ret == -EAGAIN) { 108 if (!req_has_async_data(req)) { 109 if (io_alloc_async_data(req)) 110 return -ENOMEM; 111 io_uring_cmd_prep_async(req); 112 } 113 return -EAGAIN; 114 } 115 116 if (ret != -EIOCBQUEUED) { 117 if (ret < 0) 118 req_set_fail(req); 119 io_req_set_res(req, ret, 0); 120 return ret; 121 } 122 123 return IOU_ISSUE_SKIP_COMPLETE; 124 } 125