Home
last modified time | relevance | path

Searched refs:issue_flags (Results 1 – 25 of 47) sorted by relevance

12

/openbmc/linux/io_uring/
H A Dnet.h35 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
40 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
42 int io_send(struct io_kiocb *req, unsigned int issue_flags);
47 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
48 int io_recv(struct io_kiocb *req, unsigned int issue_flags);
53 int io_accept(struct io_kiocb *req, unsigned int issue_flags);
56 int io_socket(struct io_kiocb *req, unsigned int issue_flags);
60 int io_connect(struct io_kiocb *req, unsigned int issue_flags);
62 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
63 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
H A Dnet.c82 unsigned int issue_flags) in io_check_multishot() argument
146 unsigned int issue_flags) in io_msg_alloc_async() argument
179 unsigned int issue_flags) in io_setup_async_msg() argument
338 unsigned int issue_flags) in io_setup_async_addr() argument
429 if (issue_flags & IO_URING_F_NONBLOCK) in io_sendmsg()
454 io_netmsg_recycle(req, issue_flags); in io_sendmsg()
677 unsigned issue_flags) in io_recv_finish() argument
855 io_kbuf_recycle(req, issue_flags); in io_recvmsg()
907 io_kbuf_recycle(req, issue_flags); in io_recvmsg()
1009 io_kbuf_recycle(req, issue_flags); in io_recv()
[all …]
H A Dopenclose.c109 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument
123 if (issue_flags & IO_URING_F_NONBLOCK) { in io_openat2()
148 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) in io_openat2()
153 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) in io_openat2()
159 ret = io_fixed_fd_install(req, issue_flags, file, in io_openat2()
170 int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument
172 return io_openat2(req, issue_flags); in io_openat()
188 io_ring_submit_lock(ctx, issue_flags); in __io_close_fixed()
190 io_ring_submit_unlock(ctx, issue_flags); in __io_close_fixed()
219 int io_close(struct io_kiocb *req, unsigned int issue_flags) in io_close() argument
[all …]
H A Dkbuf.h45 unsigned int issue_flags);
49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
59 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
61 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
101 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle() argument
104 io_kbuf_recycle_legacy(req, issue_flags); in io_kbuf_recycle()
139 unsigned issue_flags) in io_put_kbuf() argument
144 return __io_put_kbuf(req, issue_flags); in io_put_kbuf()
H A Dmsg_ring.c42 unsigned int issue_flags) in io_double_lock_ctx() argument
49 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_double_lock_ctx()
148 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) in io_msg_ring_data()
167 io_ring_submit_lock(ctx, issue_flags); in io_msg_grab_file()
174 io_ring_submit_unlock(ctx, issue_flags); in io_msg_grab_file()
185 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) in io_msg_install_complete()
237 src_file = io_msg_grab_file(req, issue_flags); in io_msg_send_fd()
246 return io_msg_install_complete(req, issue_flags); in io_msg_send_fd()
269 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) in io_msg_ring() argument
280 ret = io_msg_ring_data(req, issue_flags); in io_msg_ring()
[all …]
H A Dfs.h4 int io_renameat(struct io_kiocb *req, unsigned int issue_flags);
8 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags);
12 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags);
16 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags);
19 int io_linkat(struct io_kiocb *req, unsigned int issue_flags);
H A During_cmd.c19 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; in io_uring_cmd_work() local
21 ioucmd->task_work_cb(ioucmd, issue_flags); in io_uring_cmd_work()
55 unsigned issue_flags) in io_uring_cmd_done() argument
70 .locked = !(issue_flags & IO_URING_F_UNLOCKED), in io_uring_cmd_done()
113 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) in io_uring_cmd() argument
128 issue_flags |= IO_URING_F_SQE128; in io_uring_cmd()
130 issue_flags |= IO_URING_F_CQE32; in io_uring_cmd()
134 issue_flags |= IO_URING_F_IOPOLL; in io_uring_cmd()
139 ret = file->f_op->uring_cmd(ioucmd, issue_flags); in io_uring_cmd()
168 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) in io_uring_cmd_sock() argument
H A Dopenclose.h3 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
7 int io_openat(struct io_kiocb *req, unsigned int issue_flags);
11 int io_openat2(struct io_kiocb *req, unsigned int issue_flags);
14 int io_close(struct io_kiocb *req, unsigned int issue_flags);
H A Dxattr.h6 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags);
9 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags);
12 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags);
15 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags);
H A Dxattr.c108 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fgetxattr() argument
113 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fgetxattr()
123 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) in io_getxattr() argument
130 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_getxattr()
206 static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags, in __io_setxattr() argument
221 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fsetxattr() argument
225 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsetxattr()
227 ret = __io_setxattr(req, issue_flags, &req->file->f_path); in io_fsetxattr()
232 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) in io_setxattr() argument
239 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_setxattr()
[all …]
H A Dpoll.h28 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
31 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
35 unsigned issue_flags);
36 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
H A Dsync.c40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
46 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_sync_file_range()
70 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
77 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsync()
99 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument
105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fallocate()
H A Dsplice.c48 int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument
56 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_tee()
59 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in io_tee()
88 int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument
97 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_splice()
100 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in io_splice()
H A Dfs.c81 int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
86 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_renameat()
130 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument
135 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_unlinkat()
177 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) in io_mkdirat() argument
182 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_mkdirat()
227 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_symlinkat() argument
232 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_symlinkat()
272 int io_linkat(struct io_kiocb *req, unsigned int issue_flags) in io_linkat() argument
277 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_linkat()
H A Dkbuf.c81 io_ring_submit_lock(ctx, issue_flags); in io_kbuf_recycle_legacy()
89 io_ring_submit_unlock(ctx, issue_flags); in io_kbuf_recycle_legacy()
146 unsigned int issue_flags) in io_ring_buffer_select() argument
171 if (issue_flags & IO_URING_F_UNLOCKED || in io_ring_buffer_select()
190 unsigned int issue_flags) in io_buffer_select() argument
196 io_ring_submit_lock(req->ctx, issue_flags); in io_buffer_select()
205 io_ring_submit_unlock(req->ctx, issue_flags); in io_buffer_select()
330 io_ring_submit_lock(ctx, issue_flags); in io_remove_buffers()
340 io_ring_submit_unlock(ctx, issue_flags); in io_remove_buffers()
459 io_ring_submit_lock(ctx, issue_flags); in io_provide_buffers()
[all …]
H A Dsync.h4 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags);
7 int io_fsync(struct io_kiocb *req, unsigned int issue_flags);
9 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags);
H A Dcancel.c103 unsigned issue_flags) in io_try_cancel() argument
118 ret = io_poll_cancel(ctx, cd, issue_flags); in io_try_cancel()
158 unsigned int issue_flags) in __io_async_cancel() argument
166 ret = io_try_cancel(tctx, cd, issue_flags); in __io_async_cancel()
175 io_ring_submit_lock(ctx, issue_flags); in __io_async_cancel()
187 io_ring_submit_unlock(ctx, issue_flags); in __io_async_cancel()
191 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument
209 issue_flags); in io_async_cancel()
220 ret = __io_async_cancel(&cd, tctx, issue_flags); in io_async_cancel()
H A Dadvise.c49 int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
55 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_madvise()
92 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
97 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK && io_fadvise_force_async(fa)); in io_fadvise()
H A Dpoll.c575 unsigned issue_flags) in __io_arm_poll_handler() argument
600 ipt->owning = issue_flags & IO_URING_F_UNLOCKED; in __io_arm_poll_handler()
604 if (issue_flags & IO_URING_F_UNLOCKED) in __io_arm_poll_handler()
671 unsigned issue_flags) in io_req_alloc_apoll() argument
733 apoll = io_req_alloc_apoll(req, issue_flags); in io_arm_poll_handler()
740 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler()
875 unsigned issue_flags) in io_poll_cancel() argument
883 io_ring_submit_lock(ctx, issue_flags); in io_poll_cancel()
885 io_ring_submit_unlock(ctx, issue_flags); in io_poll_cancel()
985 io_ring_submit_lock(ctx, issue_flags); in io_poll_remove()
[all …]
H A Dsplice.h4 int io_tee(struct io_kiocb *req, unsigned int issue_flags);
7 int io_splice(struct io_kiocb *req, unsigned int issue_flags);
H A Dadvise.h4 int io_madvise(struct io_kiocb *req, unsigned int issue_flags);
7 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags);
H A Drsrc.h107 unsigned int issue_flags) in io_req_set_rsrc_node() argument
110 io_ring_submit_lock(ctx, issue_flags); in io_req_set_rsrc_node()
116 io_ring_submit_unlock(ctx, issue_flags); in io_req_set_rsrc_node()
134 int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
H A Dcancel.h17 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
20 unsigned int issue_flags);
/openbmc/linux/include/linux/
H A Dio_uring.h48 unsigned issue_flags);
83 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
92 ssize_t ret2, unsigned issue_flags) in io_uring_cmd_done() argument
117 unsigned int issue_flags) in io_uring_cmd_sock() argument
/openbmc/linux/drivers/nvme/host/
H A Dioctl.c472 unsigned issue_flags) in nvme_uring_task_meta_cb() argument
497 unsigned issue_flags) in nvme_uring_task_cb() argument
604 if (issue_flags & IO_URING_F_NONBLOCK) { in nvme_uring_cmd_io()
608 if (issue_flags & IO_URING_F_IOPOLL) in nvme_uring_cmd_io()
753 unsigned int issue_flags) in nvme_ns_uring_cmd() argument
760 ret = nvme_uring_cmd_checks(issue_flags); in nvme_ns_uring_cmd()
783 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); in nvme_ns_chr_uring_cmd()
878 unsigned int issue_flags) in nvme_ns_head_chr_uring_cmd() argument
887 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags); in nvme_ns_head_chr_uring_cmd()
899 if (issue_flags & IO_URING_F_IOPOLL) in nvme_dev_uring_cmd()
[all …]

12