/openbmc/linux/io_uring/ |
H A D | fs.c | 50 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_renameat_prep() argument 55 if (sqe->buf_index || sqe->splice_fd_in) in io_renameat_prep() 60 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep() 61 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_renameat_prep() 62 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_renameat_prep() 63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep() 64 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep() 104 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_unlinkat_prep() argument 109 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in) in io_unlinkat_prep() 114 un->dfd = READ_ONCE(sqe->fd); in io_unlinkat_prep() [all …]
|
H A D | sync.c | 25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument 29 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) in io_sfr_prep() 32 sync->off = READ_ONCE(sqe->off); in io_sfr_prep() 33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep() 34 sync->flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep() 53 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument 57 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) in io_fsync_prep() 60 sync->flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep() 64 sync->off = READ_ONCE(sqe->off); in io_fsync_prep() 65 sync->len = READ_ONCE(sqe->len); in io_fsync_prep() [all …]
|
H A D | advise.c | 31 int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument 36 if (sqe->buf_index || sqe->off || sqe->splice_fd_in) in io_madvise_prep() 39 ma->addr = READ_ONCE(sqe->addr); in io_madvise_prep() 40 ma->len = READ_ONCE(sqe->len); in io_madvise_prep() 41 ma->advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep() 77 int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument 81 if (sqe->buf_index || sqe->addr || sqe->splice_fd_in) in io_fadvise_prep() 84 fa->offset = READ_ONCE(sqe->off); in io_fadvise_prep() 85 fa->len = READ_ONCE(sqe->len); in io_fadvise_prep() 86 fa->advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
|
H A D | openclose.c | 45 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument 51 if (unlikely(sqe->buf_index)) in __io_openat_prep() 60 open->dfd = READ_ONCE(sqe->fd); in __io_openat_prep() 61 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_openat_prep() 69 open->file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep() 80 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument 83 u64 mode = READ_ONCE(sqe->len); in io_openat_prep() 84 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep() 87 return __io_openat_prep(req, sqe); in io_openat_prep() 90 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument [all …]
|
H A D | xattr.c | 45 const struct io_uring_sqe *sqe) in __io_getxattr_prep() argument 56 name = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_getxattr_prep() 57 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in __io_getxattr_prep() 58 ix->ctx.size = READ_ONCE(sqe->len); in __io_getxattr_prep() 59 ix->ctx.flags = READ_ONCE(sqe->xattr_flags); in __io_getxattr_prep() 82 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fgetxattr_prep() argument 84 return __io_getxattr_prep(req, sqe); in io_fgetxattr_prep() 87 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_getxattr_prep() argument 93 ret = __io_getxattr_prep(req, sqe); in io_getxattr_prep() 97 path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); in io_getxattr_prep() [all …]
|
H A D | splice.c | 27 const struct io_uring_sqe *sqe) in __io_splice_prep() argument 32 sp->len = READ_ONCE(sqe->len); in __io_splice_prep() 33 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep() 36 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); in __io_splice_prep() 41 int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_tee_prep() argument 43 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) in io_tee_prep() 45 return __io_splice_prep(req, sqe); in io_tee_prep() 79 int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument 83 sp->off_in = READ_ONCE(sqe->splice_off_in); in io_splice_prep() 84 sp->off_out = READ_ONCE(sqe->off); in io_splice_prep() [all …]
|
H A D | statx.c | 23 int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument 28 if (sqe->buf_index || sqe->splice_fd_in) in io_statx_prep() 33 sx->dfd = READ_ONCE(sqe->fd); in io_statx_prep() 34 sx->mask = READ_ONCE(sqe->len); in io_statx_prep() 35 path = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep() 36 sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep() 37 sx->flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
|
H A D | epoll.c | 24 int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_epoll_ctl_prep() argument 28 if (sqe->buf_index || sqe->splice_fd_in) in io_epoll_ctl_prep() 31 epoll->epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep() 32 epoll->op = READ_ONCE(sqe->len); in io_epoll_ctl_prep() 33 epoll->fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep() 38 ev = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_epoll_ctl_prep()
|
H A D | net.c | 94 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() argument 98 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || in io_shutdown_prep() 99 sqe->buf_index || sqe->splice_fd_in)) in io_shutdown_prep() 102 shutdown->how = READ_ONCE(sqe->len); in io_shutdown_prep() 373 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument 378 if (READ_ONCE(sqe->__pad3[0])) in io_sendmsg_prep() 380 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_sendmsg_prep() 381 sr->addr_len = READ_ONCE(sqe->addr_len); in io_sendmsg_prep() 382 } else if (sqe->addr2 || sqe->file_index) { in io_sendmsg_prep() 386 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_sendmsg_prep() [all …]
|
H A D | net.h | 34 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 39 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 46 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 52 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 55 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 59 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 64 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
H A D | fs.h | 3 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 7 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 11 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 15 int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 18 int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
H A D | uring_cmd.c | 81 memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx)); in io_uring_cmd_prep_async() 82 ioucmd->sqe = req->async_data; in io_uring_cmd_prep_async() 86 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_uring_cmd_prep() argument 90 if (sqe->__pad1) in io_uring_cmd_prep() 93 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags); in io_uring_cmd_prep() 101 req->buf_index = READ_ONCE(sqe->buf_index); in io_uring_cmd_prep() 108 ioucmd->sqe = sqe; in io_uring_cmd_prep() 109 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); in io_uring_cmd_prep()
|
H A D | fdinfo.c | 93 struct io_uring_sqe *sqe; in io_uring_show_fdinfo() local 101 sqe = &ctx->sq_sqes[sq_idx << sq_shift]; in io_uring_show_fdinfo() 105 sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd, in io_uring_show_fdinfo() 106 sqe->flags, (unsigned long long) sqe->off, in io_uring_show_fdinfo() 107 (unsigned long long) sqe->addr, sqe->rw_flags, in io_uring_show_fdinfo() 108 sqe->buf_index, sqe->user_data); in io_uring_show_fdinfo() 110 u64 *sqeb = (void *) (sqe + 1); in io_uring_show_fdinfo()
|
H A D | xattr.h | 5 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 8 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 11 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 14 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
H A D | timeout.c | 425 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_timeout_remove_prep() argument 431 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) in io_timeout_remove_prep() 435 tr->addr = READ_ONCE(sqe->addr); in io_timeout_remove_prep() 436 tr->flags = READ_ONCE(sqe->timeout_flags); in io_timeout_remove_prep() 444 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) in io_timeout_remove_prep() 495 const struct io_uring_sqe *sqe, in __io_timeout_prep() argument 501 u32 off = READ_ONCE(sqe->off); in __io_timeout_prep() 503 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) in __io_timeout_prep() 507 flags = READ_ONCE(sqe->timeout_flags); in __io_timeout_prep() 540 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) in __io_timeout_prep() [all …]
|
H A D | msg_ring.c | 249 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_msg_ring_prep() argument 253 if (unlikely(sqe->buf_index || sqe->personality)) in io_msg_ring_prep() 257 msg->user_data = READ_ONCE(sqe->off); in io_msg_ring_prep() 258 msg->len = READ_ONCE(sqe->len); in io_msg_ring_prep() 259 msg->cmd = READ_ONCE(sqe->addr); in io_msg_ring_prep() 260 msg->src_fd = READ_ONCE(sqe->addr3); in io_msg_ring_prep() 261 msg->dst_fd = READ_ONCE(sqe->file_index); in io_msg_ring_prep() 262 msg->flags = READ_ONCE(sqe->msg_ring_flags); in io_msg_ring_prep()
|
/openbmc/linux/drivers/infiniband/sw/siw/ |
H A D | siw_qp.c | 278 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 279 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 280 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 281 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 282 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts() 287 wqe->sqe.rkey = 1; in siw_qp_mpa_rts() 288 wqe->sqe.raddr = 0; in siw_qp_mpa_rts() 292 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts() 296 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts() 303 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts() [all …]
|
H A D | siw_qp_tx.c | 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 136 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 138 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() 139 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() 140 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx() 141 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx() 184 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx() 196 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() [all …]
|
H A D | siw_verbs.c | 644 struct siw_sqe *sqe) in siw_copy_inline_sgl() argument 647 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl() 650 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl() 651 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl() 669 sqe->sge[0].length = max(bytes, 0); in siw_copy_inline_sgl() 670 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl() 682 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local 686 sqe.opcode = SIW_OP_WRITE; in siw_sq_flush_wr() 689 sqe.opcode = SIW_OP_READ; in siw_sq_flush_wr() 692 sqe.opcode = SIW_OP_READ_LOCAL_INV; in siw_sq_flush_wr() [all …]
|
/openbmc/linux/drivers/crypto/hisilicon/zip/ |
H A D | zip_crypto.c | 101 void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); 102 void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); 103 void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type); 104 void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type); 105 void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); 106 void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type); 107 u32 (*get_tag)(struct hisi_zip_sqe *sqe); 108 u32 (*get_status)(struct hisi_zip_sqe *sqe); 109 u32 (*get_dstlen)(struct hisi_zip_sqe *sqe); 263 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) in hisi_zip_fill_addr() argument [all …]
|
/openbmc/qemu/util/ |
H A D | fdmon-io_uring.c | 83 struct io_uring_sqe *sqe = io_uring_get_sqe(ring); in get_sqe() local 86 if (likely(sqe)) { in get_sqe() 87 return sqe; in get_sqe() 96 sqe = io_uring_get_sqe(ring); in get_sqe() 97 assert(sqe); in get_sqe() 98 return sqe; in get_sqe() 171 struct io_uring_sqe *sqe = get_sqe(ctx); in add_poll_add_sqe() local 174 io_uring_prep_poll_add(sqe, node->pfd.fd, events); in add_poll_add_sqe() 175 io_uring_sqe_set_data(sqe, node); in add_poll_add_sqe() 180 struct io_uring_sqe *sqe = get_sqe(ctx); in add_poll_remove_sqe() local [all …]
|
/openbmc/linux/tools/testing/selftests/net/ |
H A D | io_uring_zerocopy_tx.c | 271 static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd, in io_uring_prep_send() argument 274 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_send() 275 sqe->opcode = (__u8) IORING_OP_SEND; in io_uring_prep_send() 276 sqe->fd = sockfd; in io_uring_prep_send() 277 sqe->addr = (unsigned long) buf; in io_uring_prep_send() 278 sqe->len = len; in io_uring_prep_send() 279 sqe->msg_flags = (__u32) flags; in io_uring_prep_send() 282 static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd, in io_uring_prep_sendzc() argument 286 io_uring_prep_send(sqe, sockfd, buf, len, flags); in io_uring_prep_sendzc() 287 sqe->opcode = (__u8) IORING_OP_SEND_ZC; in io_uring_prep_sendzc() [all …]
|
/openbmc/linux/drivers/net/ethernet/qlogic/qed/ |
H A D | qed_nvmetcp_fw_funcs.c | 68 if (!task_params->sqe) in init_sqe() 71 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe() 72 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe() 79 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 81 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() 94 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); in init_sqe() 95 SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); in init_sqe() 99 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() 101 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 106 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() [all …]
|
/openbmc/linux/include/trace/events/ |
H A D | io_uring.h | 498 TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error), 500 TP_ARGS(sqe, req, error), 520 __string( op_str, io_uring_get_opcode(sqe->opcode) ) 526 __entry->user_data = sqe->user_data; 527 __entry->opcode = sqe->opcode; 528 __entry->flags = sqe->flags; 529 __entry->ioprio = sqe->ioprio; 530 __entry->off = sqe->off; 531 __entry->addr = sqe->addr; 532 __entry->len = sqe->len; [all …]
|
/openbmc/linux/drivers/scsi/qedf/ |
H A D | drv_fcoe_fw_funcs.c | 13 memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); in init_common_sqe() 14 SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, in init_common_sqe() 16 task_params->sqe->task_id = task_params->itid; in init_common_sqe() 167 task_params->sqe->additional_info_union.burst_length = in init_initiator_midpath_unsolicited_fcoe_task() 169 SET_FIELD(task_params->sqe->flags, in init_initiator_midpath_unsolicited_fcoe_task() 171 SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, in init_initiator_midpath_unsolicited_fcoe_task() 193 task_params->sqe->additional_info_union.seq_rec_updated_offset = in init_initiator_sequence_recovery_fcoe_task()
|