Home
last modified time | relevance | path

Searched refs:rq (Results 151 – 175 of 662) sorted by relevance

12345678910>>...27

/openbmc/linux/drivers/platform/chrome/wilco_ec/
H A Dtelemetry.c158 if (rq->reserved) in check_telem_request()
161 switch (rq->command) { in check_telem_request()
163 max_size += sizeof(rq->args.get_log); in check_telem_request()
166 max_size += sizeof(rq->args.get_version); in check_telem_request()
169 max_size += sizeof(rq->args.get_fan_info); in check_telem_request()
172 max_size += sizeof(rq->args.get_diag_info); in check_telem_request()
175 max_size += sizeof(rq->args.get_temp_info); in check_telem_request()
178 max_size += sizeof(rq->args.get_temp_read); in check_telem_request()
181 max_size += sizeof(rq->args.get_batt_ext_info); in check_telem_request()
184 if (rq->args.get_batt_ppid_info.always1 != 1) in check_telem_request()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gt/
H A Dmock_engine.c256 static void mock_add_to_engine(struct i915_request *rq) in mock_add_to_engine() argument
258 lockdep_assert_held(&rq->engine->sched_engine->lock); in mock_add_to_engine()
259 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); in mock_add_to_engine()
273 locked = READ_ONCE(rq->engine); in mock_remove_from_engine()
280 list_del_init(&rq->sched.link); in mock_remove_from_engine()
297 struct i915_request *rq; in mock_reset_cancel() local
306 i915_request_put(i915_request_mark_eio(rq)); in mock_reset_cancel()
310 list_for_each_entry(rq, &mock->hw_queue, mock.link) { in mock_reset_cancel()
311 if (i915_request_mark_eio(rq)) { in mock_reset_cancel()
312 __i915_request_submit(rq); in mock_reset_cancel()
[all …]
H A Dintel_gt_requests.c19 struct i915_request *rq, *rn; in retire_requests() local
21 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests()
22 if (!i915_request_retire(rq)) in retire_requests()
243 struct i915_request *rq, *rn; in intel_gt_watchdog_work() local
250 llist_for_each_entry_safe(rq, rn, first, watchdog.link) { in intel_gt_watchdog_work()
251 if (!i915_request_completed(rq)) { in intel_gt_watchdog_work()
252 struct dma_fence *f = &rq->fence; in intel_gt_watchdog_work()
258 i915_request_cancel(rq, -EINTR); in intel_gt_watchdog_work()
260 i915_request_put(rq); in intel_gt_watchdog_work()
H A Dintel_ring.c230 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) in intel_ring_begin() argument
232 struct intel_ring *ring = rq->ring; in intel_ring_begin()
242 total_bytes = bytes + rq->reserved_space; in intel_ring_begin()
263 total_bytes = rq->reserved_space + remain_actual; in intel_ring_begin()
279 GEM_BUG_ON(!rq->reserved_space); in intel_ring_begin()
282 i915_request_timeline(rq), in intel_ring_begin()
312 int intel_ring_cacheline_align(struct i915_request *rq) in intel_ring_cacheline_align() argument
317 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); in intel_ring_cacheline_align()
324 cs = intel_ring_begin(rq, num_dwords); in intel_ring_cacheline_align()
329 intel_ring_advance(rq, cs + num_dwords); in intel_ring_cacheline_align()
[all …]
H A Dselftest_tlb.c44 struct i915_request *rq; in pte_tlbinv() local
124 rq = i915_request_create(ce); in pte_tlbinv()
125 if (IS_ERR(rq)) { in pte_tlbinv()
126 err = PTR_ERR(rq); in pte_tlbinv()
130 err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0); in pte_tlbinv()
132 i915_request_add(rq); in pte_tlbinv()
136 i915_request_get(rq); in pte_tlbinv()
137 i915_request_add(rq); in pte_tlbinv()
142 if (!i915_request_completed(rq)) { in pte_tlbinv()
149 } else if (!i915_request_completed(rq)) { in pte_tlbinv()
[all …]
H A Dselftest_ring_submission.c72 struct i915_request *rq; in context_sync() local
75 rq = intel_context_create_request(ce); in context_sync()
76 if (IS_ERR(rq)) in context_sync()
77 return PTR_ERR(rq); in context_sync()
79 i915_request_get(rq); in context_sync()
80 i915_request_add(rq); in context_sync()
82 if (i915_request_wait(rq, 0, HZ / 5) < 0) in context_sync()
84 i915_request_put(rq); in context_sync()
H A Dintel_gt.c529 struct i915_request *rq; in __engines_record_defaults() local
545 if (IS_ERR(rq)) { in __engines_record_defaults()
546 err = PTR_ERR(rq); in __engines_record_defaults()
560 i915_request_add(rq); in __engines_record_defaults()
580 rq = requests[id]; in __engines_record_defaults()
581 if (!rq) in __engines_record_defaults()
584 if (rq->fence.error) { in __engines_record_defaults()
615 rq = requests[id]; in __engines_record_defaults()
616 if (!rq) in __engines_record_defaults()
619 ce = rq->context; in __engines_record_defaults()
[all …]
/openbmc/linux/drivers/scsi/
H A Dsr.c324 if (rq->bio != NULL) in sr_done()
369 cd = scsi_cd(rq->q->disk); in sr_init_command()
396 switch (req_op(rq)) { in sr_init_command()
930 struct request *rq; in sr_read_cdda_bpc() local
935 if (IS_ERR(rq)) in sr_read_cdda_bpc()
936 return PTR_ERR(rq); in sr_read_cdda_bpc()
937 scmd = blk_mq_rq_to_pdu(rq); in sr_read_cdda_bpc()
954 rq->timeout = 60 * HZ; in sr_read_cdda_bpc()
955 bio = rq->bio; in sr_read_cdda_bpc()
957 blk_execute_rq(rq, false); in sr_read_cdda_bpc()
[all …]
H A Dscsi_lib.c998 struct request *rq) in scsi_cmd_needs_dma_drain() argument
1048 count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); in scsi_alloc_sgtables()
1050 if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { in scsi_alloc_sgtables()
1052 (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in scsi_alloc_sgtables()
1086 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); in scsi_alloc_sgtables()
1095 count = blk_rq_map_integrity_sg(rq->q, rq->bio, in scsi_alloc_sgtables()
1134 struct request *rq; in scsi_alloc_request() local
1137 if (!IS_ERR(rq)) in scsi_alloc_request()
1138 scsi_initialize_rq(rq); in scsi_alloc_request()
1139 return rq; in scsi_alloc_request()
[all …]
/openbmc/linux/drivers/nvme/host/
H A Dnvme.h569 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; in nvme_cid()
577 struct request *rq; in nvme_find_rq() local
579 rq = blk_mq_tag_to_rq(tags, tag); in nvme_find_rq()
580 if (unlikely(!rq)) { in nvme_find_rq()
586 dev_err(nvme_req(rq)->ctrl->device, in nvme_find_rq()
591 return rq; in nvme_find_rq()
714 struct nvme_ctrl *ctrl = rq->ctrl; in nvme_try_complete_req()
717 rq->genctr++; in nvme_try_complete_req()
720 rq->result = result; in nvme_try_complete_req()
1081 nvme_mpath_start_request(rq); in nvme_start_request()
[all …]
H A Dtcp.c237 struct request *rq; in nvme_tcp_has_inline_data() local
549 struct request *rq; in nvme_tcp_process_nvme_cqe() local
552 if (!rq) { in nvme_tcp_process_nvme_cqe()
565 nvme_complete_rq(rq); in nvme_tcp_process_nvme_cqe()
574 struct request *rq; in nvme_tcp_handle_c2h_data() local
577 if (!rq) { in nvme_tcp_handle_c2h_data()
665 struct request *rq; in nvme_tcp_handle_r2t() local
670 if (!rq) { in nvme_tcp_handle_r2t()
772 struct request *rq = in nvme_tcp_recv_data() local
2331 struct request *rq) in nvme_tcp_setup_cmd_pdu() argument
[all …]
/openbmc/openbmc/poky/bitbake/lib/bb/
H A Drunqueue.py147 self.rq = runqueue
171 if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure:
243 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
255 for running in self.rq.runq_running.difference(self.rq.runq_complete):
463 self.rq = rq
1824 def __init__(self, rq): argument
1825 self.rq = rq
1826 self.cooker = rq.cooker
1828 self.rqdata = rq.rqdata
2171 self.rq.read_workers()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/display/
H A Dintel_overlay.c239 if (IS_ERR(rq)) in alloc_request()
240 return rq; in alloc_request()
248 return rq; in alloc_request()
261 if (IS_ERR(rq)) in intel_overlay_on()
262 return PTR_ERR(rq); in intel_overlay_on()
335 if (IS_ERR(rq)) in intel_overlay_continue()
336 return PTR_ERR(rq); in intel_overlay_continue()
413 if (IS_ERR(rq)) in intel_overlay_off()
414 return PTR_ERR(rq); in intel_overlay_off()
470 if (IS_ERR(rq)) in intel_overlay_release_old_vid()
[all …]
H A Dintel_display_rps.c25 struct i915_request *rq = wait->request; in do_rps_boost() local
32 if (!i915_request_started(rq)) in do_rps_boost()
33 intel_rps_boost(rq); in do_rps_boost()
34 i915_request_put(rq); in do_rps_boost()
/openbmc/linux/drivers/gpu/drm/i915/
H A Di915_active.h89 struct i915_request *rq);
167 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq);
181 int i915_request_await_active(struct i915_request *rq,
211 void i915_request_add_active_barriers(struct i915_request *rq);
220 static inline int __i915_request_await_exclusive(struct i915_request *rq, in __i915_request_await_exclusive() argument
228 err = i915_request_await_dma_fence(rq, fence); in __i915_request_await_exclusive()
/openbmc/linux/block/
H A Dblk-rq-qos.c35 void __rq_qos_done(struct rq_qos *rqos, struct request *rq) in __rq_qos_done() argument
39 rqos->ops->done(rqos, rq); in __rq_qos_done()
44 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq) in __rq_qos_issue() argument
48 rqos->ops->issue(rqos, rq); in __rq_qos_issue()
53 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq) in __rq_qos_requeue() argument
57 rqos->ops->requeue(rqos, rq); in __rq_qos_requeue()
71 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_track() argument
75 rqos->ops->track(rqos, rq, bio); in __rq_qos_track()
80 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_merge() argument
84 rqos->ops->merge(rqos, rq, bio); in __rq_qos_merge()
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dptp.c266 struct mlx5e_rq *rq = &c->rq; in mlx5e_ptp_napi_poll() local
287 rq); in mlx5e_ptp_napi_poll()
307 mlx5e_cq_arm(&rq->cq); in mlx5e_ptp_napi_poll()
683 struct mlx5e_rq *rq) in mlx5e_init_ptp_rq() argument
690 rq->pdev = c->pdev; in mlx5e_init_ptp_rq()
692 rq->priv = priv; in mlx5e_init_ptp_rq()
695 rq->mdev = mdev; in mlx5e_init_ptp_rq()
697 rq->stats = &c->priv->ptp_stats.rq; in mlx5e_init_ptp_rq()
704 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); in mlx5e_init_ptp_rq()
761 mlx5e_close_rq(&c->rq); in mlx5e_ptp_close_queues()
[all …]
/openbmc/linux/drivers/infiniband/hw/qedr/
H A Dqedr_roce_cm.c108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan; in qedr_ll2_complete_rx_packet()
117 qedr_inc_sw_gsi_cons(&qp->rq); in qedr_ll2_complete_rx_packet()
339 qp->rq.max_wr = attrs->cap.max_recv_wr; in qedr_create_gsi_qp()
651 memset(&qp->rqe_wr_id[qp->rq.prod], 0, in qedr_gsi_post_recv()
652 sizeof(qp->rqe_wr_id[qp->rq.prod])); in qedr_gsi_post_recv()
654 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; in qedr_gsi_post_recv()
656 qedr_inc_sw_prod(&qp->rq); in qedr_gsi_post_recv()
681 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) { in qedr_gsi_poll_cq()
696 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan & in qedr_gsi_poll_cq()
705 qedr_inc_sw_cons(&qp->rq); in qedr_gsi_poll_cq()
[all …]
/openbmc/linux/drivers/block/rnbd/
H A Drnbd-proto.h254 static inline u32 rq_to_rnbd_flags(struct request *rq) in rq_to_rnbd_flags() argument
258 switch (req_op(rq)) { in rq_to_rnbd_flags()
276 (__force u32)req_op(rq), in rq_to_rnbd_flags()
277 (__force unsigned long long)rq->cmd_flags); in rq_to_rnbd_flags()
281 if (op_is_sync(rq->cmd_flags)) in rq_to_rnbd_flags()
284 if (op_is_flush(rq->cmd_flags)) in rq_to_rnbd_flags()
/openbmc/linux/include/trace/events/
H A Dsched.h699 TP_PROTO(struct rq *rq),
700 TP_ARGS(rq));
703 TP_PROTO(struct rq *rq),
704 TP_ARGS(rq));
707 TP_PROTO(struct rq *rq),
708 TP_ARGS(rq));
711 TP_PROTO(struct rq *rq),
712 TP_ARGS(rq));
719 TP_PROTO(struct rq *rq),
720 TP_ARGS(rq));
[all …]
H A Dnbd.h64 struct request *rq),
66 TP_ARGS(nbd_request, index, rq),
77 __entry->request = rq;
97 struct request *rq),
99 TP_ARGS(nbd_request, index, rq),
/openbmc/linux/drivers/block/
H A Dloop.c297 __rq_for_each_bio(bio, rq) in lo_read_simple()
359 struct bio *bio = rq->bio; in lo_complete_rq()
368 blk_mq_end_request(rq, ret); in lo_complete_rq()
381 blk_mq_complete_request(rq); in lo_rw_aio_do_completion()
399 struct bio *bio = rq->bio; in lo_rw_aio()
409 if (rq->bio != rq->biotail) { in lo_rw_aio()
475 switch (req_op(rq)) { in do_req_filebacked()
1845 struct request *rq = bd->rq; in loop_queue_rq() local
1849 blk_mq_start_request(rq); in loop_queue_rq()
1854 switch (req_op(rq)) { in loop_queue_rq()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_migrate.c193 struct i915_request *rq; in __igt_lmem_pages_migrate() local
224 0xdeadbeaf, &rq); in __igt_lmem_pages_migrate()
225 if (rq) { in __igt_lmem_pages_migrate()
228 dma_resv_add_fence(obj->base.resv, &rq->fence, in __igt_lmem_pages_migrate()
230 i915_request_put(rq); in __igt_lmem_pages_migrate()
394 struct i915_request *rq; in igt_async_migrate() local
411 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); in igt_async_migrate()
413 if (IS_ERR(rq)) { in igt_async_migrate()
414 err = PTR_ERR(rq); in igt_async_migrate()
420 spin_fence = dma_fence_get(&rq->fence); in igt_async_migrate()
[all …]
/openbmc/linux/fs/erofs/
H A Dcompress.h26 int (*decompress)(struct z_erofs_decompress_req *rq,
92 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
101 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
103 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
/openbmc/linux/drivers/net/ethernet/intel/igb/
H A Digb_ptp.c503 switch (rq->type) { in igb_ptp_feature_enable_82580()
514 rq->extts.index); in igb_ptp_feature_enable_82580()
518 if (rq->extts.index == 1) { in igb_ptp_feature_enable_82580()
543 if (rq->perout.flags) in igb_ptp_feature_enable_82580()
548 rq->perout.index); in igb_ptp_feature_enable_82580()
581 int i = rq->perout.index; in igb_ptp_feature_enable_82580()
653 switch (rq->type) { in igb_ptp_feature_enable_i210()
670 rq->extts.index); in igb_ptp_feature_enable_i210()
699 if (rq->perout.flags) in igb_ptp_feature_enable_i210()
704 rq->perout.index); in igb_ptp_feature_enable_i210()
[all …]

12345678910>>...27