Lines Matching refs:ubq

74 	struct ublk_queue *ubq;  member
195 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
208 static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq) in ublk_queue_is_zoned() argument
210 return ubq->flags & UBLK_F_ZONED; in ublk_queue_is_zoned()
404 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, in ublk_setup_iod_zoned() argument
407 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_setup_iod_zoned()
408 struct ublk_io *io = &ubq->ios[req->tag]; in ublk_setup_iod_zoned()
479 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, in ublk_setup_iod_zoned() argument
628 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) in ublk_support_user_copy() argument
630 return ubq->flags & UBLK_F_USER_COPY; in ublk_support_user_copy()
633 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) in ublk_need_req_ref() argument
639 return ublk_support_user_copy(ubq); in ublk_need_req_ref()
642 static inline void ublk_init_req_ref(const struct ublk_queue *ubq, in ublk_init_req_ref() argument
645 if (ublk_need_req_ref(ubq)) { in ublk_init_req_ref()
652 static inline bool ublk_get_req_ref(const struct ublk_queue *ubq, in ublk_get_req_ref() argument
655 if (ublk_need_req_ref(ubq)) { in ublk_get_req_ref()
664 static inline void ublk_put_req_ref(const struct ublk_queue *ubq, in ublk_put_req_ref() argument
667 if (ublk_need_req_ref(ubq)) { in ublk_put_req_ref()
676 static inline bool ublk_need_get_data(const struct ublk_queue *ubq) in ublk_need_get_data() argument
678 return ubq->flags & UBLK_F_NEED_GET_DATA; in ublk_need_get_data()
704 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, in ublk_get_iod() argument
708 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]); in ublk_get_iod()
718 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_queue_cmd_buf_size() local
720 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc), in ublk_queue_cmd_buf_size()
725 struct ublk_queue *ubq) in ublk_queue_can_use_recovery_reissue() argument
727 return (ubq->flags & UBLK_F_USER_RECOVERY) && in ublk_queue_can_use_recovery_reissue()
728 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE); in ublk_queue_can_use_recovery_reissue()
732 struct ublk_queue *ubq) in ublk_queue_can_use_recovery() argument
734 return ubq->flags & UBLK_F_USER_RECOVERY; in ublk_queue_can_use_recovery()
914 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req, in ublk_map_io() argument
919 if (ublk_support_user_copy(ubq)) in ublk_map_io()
940 static int ublk_unmap_io(const struct ublk_queue *ubq, in ublk_unmap_io() argument
946 if (ublk_support_user_copy(ubq)) in ublk_unmap_io()
991 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req) in ublk_setup_iod() argument
993 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_setup_iod()
994 struct ublk_io *io = &ubq->ios[req->tag]; in ublk_setup_iod()
998 if (!ublk_queue_is_zoned(ubq) && in ublk_setup_iod()
1019 if (ublk_queue_is_zoned(ubq)) in ublk_setup_iod()
1020 return ublk_setup_iod_zoned(ubq, req); in ublk_setup_iod()
1039 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq) in ubq_daemon_is_dying() argument
1041 return ubq->ubq_daemon->flags & PF_EXITING; in ubq_daemon_is_dying()
1047 struct ublk_queue *ubq = req->mq_hctx->driver_data; in __ublk_complete_rq() local
1048 struct ublk_io *io = &ubq->ios[req->tag]; in __ublk_complete_rq()
1078 unmapped_bytes = ublk_unmap_io(ubq, req, io); in __ublk_complete_rq()
1115 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, in __ublk_fail_req() argument
1122 if (ublk_queue_can_use_recovery_reissue(ubq)) in __ublk_fail_req()
1125 ublk_put_req_ref(ubq, req); in __ublk_fail_req()
1147 static inline void __ublk_abort_rq(struct ublk_queue *ubq, in __ublk_abort_rq() argument
1151 if (ublk_queue_can_use_recovery(ubq)) in __ublk_abort_rq()
1156 mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0); in __ublk_abort_rq()
1162 struct ublk_queue *ubq = req->mq_hctx->driver_data; in __ublk_rq_task_work() local
1164 struct ublk_io *io = &ubq->ios[tag]; in __ublk_rq_task_work()
1168 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work()
1169 ublk_get_iod(ubq, req->tag)->addr); in __ublk_rq_task_work()
1180 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) { in __ublk_rq_task_work()
1181 __ublk_abort_rq(ubq, req); in __ublk_rq_task_work()
1185 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) { in __ublk_rq_task_work()
1194 __func__, io->cmd->cmd_op, ubq->q_id, in __ublk_rq_task_work()
1206 ublk_get_iod(ubq, req->tag)->addr = io->addr; in __ublk_rq_task_work()
1208 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work()
1209 ublk_get_iod(ubq, req->tag)->addr); in __ublk_rq_task_work()
1212 mapped_bytes = ublk_map_io(ubq, req, io); in __ublk_rq_task_work()
1230 ublk_get_iod(ubq, req->tag)->nr_sectors = in __ublk_rq_task_work()
1234 ublk_init_req_ref(ubq, req); in __ublk_rq_task_work()
1238 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq, in ublk_forward_io_cmds() argument
1241 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); in ublk_forward_io_cmds()
1249 static inline void ublk_abort_io_cmds(struct ublk_queue *ubq) in ublk_abort_io_cmds() argument
1251 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); in ublk_abort_io_cmds()
1255 __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data)); in ublk_abort_io_cmds()
1261 struct ublk_queue *ubq = pdu->ubq; in ublk_rq_task_work_cb() local
1263 ublk_forward_io_cmds(ubq, issue_flags); in ublk_rq_task_work_cb()
1266 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) in ublk_queue_cmd() argument
1271 if (!llist_add(&data->node, &ubq->io_cmds)) in ublk_queue_cmd()
1274 io = &ubq->ios[rq->tag]; in ublk_queue_cmd()
1288 ublk_abort_io_cmds(ubq); in ublk_queue_cmd()
1293 pdu->ubq = ubq; in ublk_queue_cmd()
1300 struct ublk_queue *ubq = rq->mq_hctx->driver_data; in ublk_timeout() local
1302 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) { in ublk_timeout()
1303 if (!ubq->timeout) { in ublk_timeout()
1304 send_sig(SIGKILL, ubq->ubq_daemon, 0); in ublk_timeout()
1305 ubq->timeout = true; in ublk_timeout()
1317 struct ublk_queue *ubq = hctx->driver_data; in ublk_queue_rq() local
1322 res = ublk_setup_iod(ubq, rq); in ublk_queue_rq()
1335 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort)) in ublk_queue_rq()
1340 if (unlikely(ubq_daemon_is_dying(ubq))) { in ublk_queue_rq()
1341 __ublk_abort_rq(ubq, rq); in ublk_queue_rq()
1345 ublk_queue_cmd(ubq, rq); in ublk_queue_rq()
1354 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num); in ublk_init_hctx() local
1356 hctx->driver_data = ubq; in ublk_init_hctx()
1427 struct ublk_queue *ubq = ublk_get_queue(ub, qid); in ublk_commit_completion() local
1428 struct ublk_io *io = &ubq->ios[tag]; in ublk_commit_completion()
1444 ublk_put_req_ref(ubq, req); in ublk_commit_completion()
1452 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_abort_queue() argument
1459 for (i = 0; i < ubq->q_depth; i++) { in ublk_abort_queue()
1460 struct ublk_io *io = &ubq->ios[i]; in ublk_abort_queue()
1469 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); in ublk_abort_queue()
1471 __ublk_fail_req(ubq, io, rq); in ublk_abort_queue()
1484 struct ublk_queue *ubq = ublk_get_queue(ub, i); in ublk_daemon_monitor_work() local
1486 if (ubq_daemon_is_dying(ubq)) { in ublk_daemon_monitor_work()
1487 if (ublk_queue_can_use_recovery(ubq)) in ublk_daemon_monitor_work()
1493 ublk_abort_queue(ub, ubq); in ublk_daemon_monitor_work()
1509 static inline bool ublk_queue_ready(struct ublk_queue *ubq) in ublk_queue_ready() argument
1511 return ubq->nr_io_ready == ubq->q_depth; in ublk_queue_ready()
1514 static void ublk_cancel_queue(struct ublk_queue *ubq) in ublk_cancel_queue() argument
1518 for (i = 0; i < ubq->q_depth; i++) { in ublk_cancel_queue()
1519 struct ublk_io *io = &ubq->ios[i]; in ublk_cancel_queue()
1524 spin_lock(&ubq->cancel_lock); in ublk_cancel_queue()
1528 spin_unlock(&ubq->cancel_lock); in ublk_cancel_queue()
1650 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_mark_io_ready() argument
1653 ubq->nr_io_ready++; in ublk_mark_io_ready()
1654 if (ublk_queue_ready(ubq)) { in ublk_mark_io_ready()
1655 ubq->ubq_daemon = current; in ublk_mark_io_ready()
1656 get_task_struct(ubq->ubq_daemon); in ublk_mark_io_ready()
1670 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_handle_need_get_data() local
1673 ublk_queue_cmd(ubq, req); in ublk_handle_need_get_data()
1702 struct ublk_queue *ubq; in __ublk_ch_uring_cmd() local
1716 ubq = ublk_get_queue(ub, ub_cmd->q_id); in __ublk_ch_uring_cmd()
1717 if (!ubq || ub_cmd->q_id != ubq->q_id) in __ublk_ch_uring_cmd()
1720 if (ubq->ubq_daemon && ubq->ubq_daemon != current) in __ublk_ch_uring_cmd()
1723 if (tag >= ubq->q_depth) in __ublk_ch_uring_cmd()
1726 io = &ubq->ios[tag]; in __ublk_ch_uring_cmd()
1750 if (ublk_queue_ready(ubq)) { in __ublk_ch_uring_cmd()
1761 if (!ublk_support_user_copy(ubq)) { in __ublk_ch_uring_cmd()
1766 if (!ub_cmd->addr && !ublk_need_get_data(ubq)) in __ublk_ch_uring_cmd()
1775 ublk_mark_io_ready(ub, ubq); in __ublk_ch_uring_cmd()
1783 if (!ublk_support_user_copy(ubq)) { in __ublk_ch_uring_cmd()
1788 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || in __ublk_ch_uring_cmd()
1822 struct ublk_queue *ubq, int tag, size_t offset) in __ublk_check_and_get_req() argument
1826 if (!ublk_need_req_ref(ubq)) in __ublk_check_and_get_req()
1829 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); in __ublk_check_and_get_req()
1833 if (!ublk_get_req_ref(ubq, req)) in __ublk_check_and_get_req()
1847 ublk_put_req_ref(ubq, req); in __ublk_check_and_get_req()
1889 struct ublk_queue *ubq; in ublk_check_and_get_req() local
1910 ubq = ublk_get_queue(ub, q_id); in ublk_check_and_get_req()
1911 if (!ubq) in ublk_check_and_get_req()
1914 if (tag >= ubq->q_depth) in ublk_check_and_get_req()
1917 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off); in ublk_check_and_get_req()
1930 ublk_put_req_ref(ubq, req); in ublk_check_and_get_req()
1936 struct ublk_queue *ubq; in ublk_ch_read_iter() local
1946 ubq = req->mq_hctx->driver_data; in ublk_ch_read_iter()
1947 ublk_put_req_ref(ubq, req); in ublk_ch_read_iter()
1954 struct ublk_queue *ubq; in ublk_ch_write_iter() local
1964 ubq = req->mq_hctx->driver_data; in ublk_ch_write_iter()
1965 ublk_put_req_ref(ubq, req); in ublk_ch_write_iter()
1984 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_deinit_queue() local
1986 if (ubq->ubq_daemon) in ublk_deinit_queue()
1987 put_task_struct(ubq->ubq_daemon); in ublk_deinit_queue()
1988 if (ubq->io_cmd_buf) in ublk_deinit_queue()
1989 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size)); in ublk_deinit_queue()
1994 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_init_queue() local
1999 spin_lock_init(&ubq->cancel_lock); in ublk_init_queue()
2000 ubq->flags = ub->dev_info.flags; in ublk_init_queue()
2001 ubq->q_id = q_id; in ublk_init_queue()
2002 ubq->q_depth = ub->dev_info.queue_depth; in ublk_init_queue()
2009 ubq->io_cmd_buf = ptr; in ublk_init_queue()
2010 ubq->dev = ub; in ublk_init_queue()
2602 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) in ublk_queue_reinit() argument
2606 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq))); in ublk_queue_reinit()
2609 ubq->nr_io_ready = 0; in ublk_queue_reinit()
2611 put_task_struct(ubq->ubq_daemon); in ublk_queue_reinit()
2613 ubq->ubq_daemon = NULL; in ublk_queue_reinit()
2614 ubq->timeout = false; in ublk_queue_reinit()
2616 for (i = 0; i < ubq->q_depth; i++) { in ublk_queue_reinit()
2617 struct ublk_io *io = &ubq->ios[i]; in ublk_queue_reinit()