Home
last modified time | relevance | path

Searched refs:hctx (Results 1 – 25 of 59) sorted by relevance

123

/openbmc/linux/block/
H A Dblk-mq-sched.c22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_mark_restart_hctx() argument
24 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_mark_restart_hctx()
27 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in blk_mq_sched_mark_restart_hctx()
31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in __blk_mq_sched_restart() argument
33 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); in __blk_mq_sched_restart()
44 blk_mq_run_hw_queue(hctx, true); in __blk_mq_sched_restart()
58 struct blk_mq_hw_ctx *hctx = in blk_mq_dispatch_hctx_list() local
65 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
74 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list()
87 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) in __blk_mq_do_dispatch_sched() argument
[all …]
H A Dblk-mq-sysfs.c34 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, in blk_mq_hw_sysfs_release() local
37 blk_free_flush_queue(hctx->fq); in blk_mq_hw_sysfs_release()
38 sbitmap_free(&hctx->ctx_map); in blk_mq_hw_sysfs_release()
39 free_cpumask_var(hctx->cpumask); in blk_mq_hw_sysfs_release()
40 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
41 kfree(hctx); in blk_mq_hw_sysfs_release()
53 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local
58 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show()
59 q = hctx->queue; in blk_mq_hw_sysfs_show()
65 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show()
[all …]
H A Dblk-mq.h48 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
50 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
51 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
121 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
160 struct blk_mq_hw_ctx *hctx; member
176 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
189 struct blk_mq_hw_ctx *hctx) in bt_wait_ptr() argument
191 if (!hctx) in bt_wait_ptr()
193 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
199 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument
[all …]
H A Dblk-mq.c51 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
60 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
62 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
63 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
64 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
70 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
73 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
75 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
76 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending()
[all …]
H A Dblk-mq-debugfs.c178 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() local
180 blk_flags_show(m, hctx->state, hctx_state_name, in hctx_state_show()
206 struct blk_mq_hw_ctx *hctx = data; in hctx_flags_show() local
207 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); in hctx_flags_show()
217 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), in hctx_flags_show()
314 __acquires(&hctx->lock) in hctx_dispatch_start()
316 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_start() local
318 spin_lock(&hctx->lock); in hctx_dispatch_start()
319 return seq_list_start(&hctx->dispatch, *pos); in hctx_dispatch_start()
324 struct blk_mq_hw_ctx *hctx = m->private; in hctx_dispatch_next() local
[all …]
H A Dblk-mq-tag.c38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
42 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_busy()
48 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_busy()
49 struct request_queue *q = hctx->queue; in __blk_mq_tag_busy()
55 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || in __blk_mq_tag_busy()
56 test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
81 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
83 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
86 if (blk_mq_is_shared_tags(hctx->flags)) { in __blk_mq_tag_idle()
87 struct request_queue *q = hctx->queue; in __blk_mq_tag_idle()
[all …]
H A Dblk-mq-sched.h16 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
17 void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
19 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
25 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_restart() argument
27 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) in blk_mq_sched_restart()
28 __blk_mq_sched_restart(hctx); in blk_mq_sched_restart()
70 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_has_work() argument
72 struct elevator_queue *e = hctx->queue->elevator; in blk_mq_sched_has_work()
75 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
80 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) in blk_mq_sched_needs_restart() argument
[all …]
H A Dkyber-iosched.c453 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) in kyber_depth_updated() argument
455 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; in kyber_depth_updated()
456 struct blk_mq_tags *tags = hctx->sched_tags; in kyber_depth_updated()
464 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument
469 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
473 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
475 GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
479 for (i = 0; i < hctx->nr_ctx; i++) in kyber_init_hctx()
483 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx()
484 ilog2(8), GFP_KERNEL, hctx->numa_node, in kyber_init_hctx()
[all …]
H A Dblk-mq-debugfs.h25 struct blk_mq_hw_ctx *hctx);
26 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
33 struct blk_mq_hw_ctx *hctx);
34 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
44 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_hctx() argument
48 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_hctx() argument
69 struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_register_sched_hctx() argument
73 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) in blk_mq_debugfs_unregister_sched_hctx() argument
H A Dmq-deadline.c596 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) in dd_dispatch_request() argument
598 struct deadline_data *dd = hctx->queue->elevator->elevator_data; in dd_dispatch_request()
630 static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth) in dd_to_word_depth() argument
632 struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags; in dd_to_word_depth()
633 const unsigned int nrr = hctx->queue->nr_requests; in dd_to_word_depth()
654 data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth); in dd_limit_depth()
658 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) in dd_depth_updated() argument
660 struct request_queue *q = hctx->queue; in dd_depth_updated()
662 struct blk_mq_tags *tags = hctx->sched_tags; in dd_depth_updated()
670 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument
[all …]
H A Dblk-flush.c363 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io() local
387 blk_mq_sched_restart(hctx); in mq_flush_data_end_io()
542 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_set_fq_lock_class() argument
545 lockdep_set_class(&hctx->fq->mq_flush_lock, key); in blk_mq_hctx_set_fq_lock_class()
/openbmc/linux/samples/hid/
H A Dhid_mouse.bpf.c9 int BPF_PROG(hid_y_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
12 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG()
17 bpf_printk("event: size: %d", hctx->size); in BPF_PROG()
55 int BPF_PROG(hid_x_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
58 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG()
73 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
75 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
H A Dhid_surface_dial.bpf.c14 int BPF_PROG(hid_event, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
16 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 9 /* size */); in BPF_PROG()
105 int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
107 __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */); in BPF_PROG()
/openbmc/linux/net/dccp/ccids/
H A Dccid3.h104 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local
105 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk()
106 return hctx; in ccid3_hc_tx_sk()
/openbmc/linux/include/linux/
H A Dblk-mq.h883 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
884 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
887 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
894 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
895 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
952 #define queue_for_each_hw_ctx(q, hctx, i) \ argument
953 xa_for_each(&(q)->hctx_table, (i), (hctx))
955 #define hctx_for_each_ctx(hctx, ctx, i) \ argument
956 for ((i) = 0; (i) < (hctx)->nr_ctx && \
957 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
[all …]
/openbmc/linux/drivers/hid/bpf/entrypoints/
H A Dentrypoints.bpf.c18 int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx) in BPF_PROG() argument
20 bpf_tail_call(ctx, &hid_jmp_table, hctx->index); in BPF_PROG()
/openbmc/linux/drivers/s390/block/
H A Dscm_blk.c283 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, in scm_blk_request() argument
286 struct scm_device *scmdev = hctx->queue->queuedata; in scm_blk_request()
288 struct scm_queue *sq = hctx->driver_data; in scm_blk_request()
332 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scm_blk_init_hctx() argument
341 hctx->driver_data = qd; in scm_blk_init_hctx()
346 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) in scm_blk_exit_hctx() argument
348 struct scm_queue *qd = hctx->driver_data; in scm_blk_exit_hctx()
351 kfree(hctx->driver_data); in scm_blk_exit_hctx()
352 hctx->driver_data = NULL; in scm_blk_exit_hctx()
/openbmc/linux/drivers/block/rnbd/
H A Drnbd-clt.c139 if (WARN_ON(!q->hctx)) in rnbd_clt_dev_requeue()
143 blk_mq_run_hw_queue(q->hctx, true); in rnbd_clt_dev_requeue()
1096 struct blk_mq_hw_ctx *hctx, in rnbd_clt_dev_kick_mq_queue() argument
1099 struct rnbd_queue *q = hctx->driver_data; in rnbd_clt_dev_kick_mq_queue()
1102 blk_mq_delay_run_hw_queue(hctx, delay); in rnbd_clt_dev_kick_mq_queue()
1108 blk_mq_delay_run_hw_queue(hctx, 10/*ms*/); in rnbd_clt_dev_kick_mq_queue()
1111 static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, in rnbd_queue_rq() argument
1126 rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); in rnbd_queue_rq()
1140 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq()
1150 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); in rnbd_queue_rq()
[all …]
H A Drnbd-clt.h105 struct blk_mq_hw_ctx *hctx; member
/openbmc/linux/drivers/nvme/target/
H A Dloop.c131 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_loop_queue_rq() argument
134 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_loop_queue_rq()
135 struct nvme_loop_queue *queue = hctx->driver_data; in nvme_loop_queue_rq()
218 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_hctx() argument
232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key); in nvme_loop_init_hctx()
234 hctx->driver_data = queue; in nvme_loop_init_hctx()
238 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_loop_init_admin_hctx() argument
246 hctx->driver_data = queue; in nvme_loop_init_admin_hctx()
/openbmc/linux/drivers/block/
H A Dvirtio_blk.c132 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx) in get_virtio_blk_vq() argument
134 struct virtio_blk *vblk = hctx->queue->queuedata; in get_virtio_blk_vq()
135 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq()
214 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, in virtblk_map_data() argument
230 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); in virtblk_map_data()
380 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) in virtio_commit_rqs() argument
382 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs()
383 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
407 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, in virtblk_prep_rq() argument
419 num = virtblk_map_data(hctx, req, vbr); in virtblk_prep_rq()
[all …]
/openbmc/linux/drivers/block/null_blk/
H A Dmain.c1657 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in null_poll() argument
1659 struct nullb_queue *nq = hctx->driver_data; in null_poll()
1690 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in null_timeout_rq() local
1693 if (hctx->type == HCTX_TYPE_POLL) { in null_timeout_rq()
1694 struct nullb_queue *nq = hctx->driver_data; in null_timeout_rq()
1716 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) in null_timeout_rq()
1721 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument
1726 struct nullb_queue *nq = hctx->driver_data; in null_queue_rq()
1729 const bool is_poll = hctx->type == HCTX_TYPE_POLL; in null_queue_rq()
1731 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in null_queue_rq()
[all …]
/openbmc/linux/drivers/nvme/host/
H A Dapple.c732 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in apple_nvme_queue_rq() argument
735 struct nvme_ns *ns = hctx->queue->queuedata; in apple_nvme_queue_rq()
736 struct apple_nvme_queue *q = hctx->driver_data; in apple_nvme_queue_rq()
775 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in apple_nvme_init_hctx() argument
778 hctx->driver_data = data; in apple_nvme_init_hctx()
936 static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, in apple_nvme_poll() argument
939 struct apple_nvme_queue *q = hctx->driver_data; in apple_nvme_poll()
/openbmc/linux/drivers/scsi/
H A Dscsi_lib.c1705 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_mq_get_rq_budget_token()
1854 static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) in scsi_mq_poll() argument
1856 struct Scsi_Host *shost = hctx->driver_data; in scsi_mq_poll()
1859 return shost->hostt->mq_poll(shost, hctx->queue_num); in scsi_mq_poll()
1864 static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in scsi_init_hctx() argument
1869 hctx->driver_data = shost; in scsi_init_hctx()
1940 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) in scsi_commit_rqs() argument
1942 struct Scsi_Host *shost = hctx->driver_data; in scsi_commit_rqs()
1944 shost->hostt->commit_rqs(shost, hctx->queue_num); in scsi_commit_rqs()
1712 scsi_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd) scsi_queue_rq() argument
/openbmc/linux/drivers/ufs/core/
H A Dufs-mcq.c114 struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx); in ufshcd_mcq_req_to_hwq() local
116 return hctx ? &hba->uhq[hctx->queue_num] : NULL; in ufshcd_mcq_req_to_hwq()

123