1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 7 #include <uapi/linux/io_uring.h> 8 9 #include "io_uring.h" 10 #include "uring_cmd.h" 11 12 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked) 13 { 14 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 15 16 ioucmd->task_work_cb(ioucmd); 17 } 18 19 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, 20 void (*task_work_cb)(struct io_uring_cmd *)) 21 { 22 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); 23 24 ioucmd->task_work_cb = task_work_cb; 25 req->io_task_work.func = io_uring_cmd_work; 26 io_req_task_work_add(req); 27 } 28 EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); 29 30 static inline void io_req_set_cqe32_extra(struct io_kiocb *req, 31 u64 extra1, u64 extra2) 32 { 33 req->extra1 = extra1; 34 req->extra2 = extra2; 35 req->flags |= REQ_F_CQE32_INIT; 36 } 37 38 /* 39 * Called by consumers of io_uring_cmd, if they originally returned 40 * -EIOCBQUEUED upon receiving the command. 41 */ 42 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) 43 { 44 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); 45 46 if (ret < 0) 47 req_set_fail(req); 48 49 io_req_set_res(req, ret, 0); 50 if (req->ctx->flags & IORING_SETUP_CQE32) 51 io_req_set_cqe32_extra(req, res2, 0); 52 __io_req_complete(req, 0); 53 } 54 EXPORT_SYMBOL_GPL(io_uring_cmd_done); 55 56 int io_uring_cmd_prep_async(struct io_kiocb *req) 57 { 58 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 59 size_t cmd_size; 60 61 BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16); 62 BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80); 63 64 cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128); 65 66 memcpy(req->async_data, ioucmd->cmd, cmd_size); 67 return 0; 68 } 69 70 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 71 { 72 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 73 74 if (sqe->rw_flags || sqe->__pad1) 75 return -EINVAL; 76 ioucmd->cmd = sqe->cmd; 77 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); 78 return 0; 79 } 80 81 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) 82 { 83 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 84 struct io_ring_ctx *ctx = req->ctx; 85 struct file *file = req->file; 86 int ret; 87 88 if (!req->file->f_op->uring_cmd) 89 return -EOPNOTSUPP; 90 91 if (ctx->flags & IORING_SETUP_SQE128) 92 issue_flags |= IO_URING_F_SQE128; 93 if (ctx->flags & IORING_SETUP_CQE32) 94 issue_flags |= IO_URING_F_CQE32; 95 if (ctx->flags & IORING_SETUP_IOPOLL) 96 issue_flags |= IO_URING_F_IOPOLL; 97 98 if (req_has_async_data(req)) 99 ioucmd->cmd = req->async_data; 100 101 ret = file->f_op->uring_cmd(ioucmd, issue_flags); 102 if (ret == -EAGAIN) { 103 if (!req_has_async_data(req)) { 104 if (io_alloc_async_data(req)) 105 return -ENOMEM; 106 io_uring_cmd_prep_async(req); 107 } 108 return -EAGAIN; 109 } 110 111 if (ret != -EIOCBQUEUED) { 112 if (ret < 0) 113 req_set_fail(req); 114 io_req_set_res(req, ret, 0); 115 return IOU_OK; 116 } 117 118 return IOU_ISSUE_SKIP_COMPLETE; 119 } 120