Lines Matching refs:workload
63 static void update_shadow_pdps(struct intel_vgpu_workload *workload) in update_shadow_pdps() argument
66 struct intel_context *ctx = workload->req->context; in update_shadow_pdps()
68 if (WARN_ON(!workload->shadow_mm)) in update_shadow_pdps()
71 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) in update_shadow_pdps()
76 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); in update_shadow_pdps()
84 static void sr_oa_regs(struct intel_vgpu_workload *workload, in sr_oa_regs() argument
87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; in sr_oa_regs()
101 if (workload->engine->id != RCS0) in sr_oa_regs()
105 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; in sr_oa_regs()
107 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { in sr_oa_regs()
110 workload->flex_mmio[i] = reg_state[state_offset + 1]; in sr_oa_regs()
115 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; in sr_oa_regs()
117 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { in sr_oa_regs()
122 reg_state[state_offset + 1] = workload->flex_mmio[i]; in sr_oa_regs()
127 static int populate_shadow_context(struct intel_vgpu_workload *workload) in populate_shadow_context() argument
129 struct intel_vgpu *vgpu = workload->vgpu; in populate_shadow_context()
131 struct intel_context *ctx = workload->req->context; in populate_shadow_context()
141 int ring_id = workload->engine->id; in populate_shadow_context()
151 sr_oa_regs(workload, (u32 *)shadow_ring_context, true); in populate_shadow_context()
153 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
156 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
165 if (workload->engine->id == RCS0) { in populate_shadow_context()
169 } else if (workload->engine->id == BCS0) in populate_shadow_context()
171 workload->ring_context_gpa + in populate_shadow_context()
182 workload->ring_context_gpa + in populate_shadow_context()
188 sr_oa_regs(workload, (u32 *)shadow_ring_context, false); in populate_shadow_context()
191 workload->engine->name, workload->ctx_desc.lrca, in populate_shadow_context()
192 workload->ctx_desc.context_id, in populate_shadow_context()
193 workload->ring_context_gpa); in populate_shadow_context()
203 workload->ctx_desc.lrca) && in populate_shadow_context()
205 workload->ring_context_gpa)) in populate_shadow_context()
208 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca; in populate_shadow_context()
209 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa; in populate_shadow_context()
215 context_page_num = workload->engine->context_size; in populate_shadow_context()
218 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0) in populate_shadow_context()
227 (u32)((workload->ctx_desc.lrca + i) << in populate_shadow_context()
253 ret = intel_gvt_scan_engine_context(workload); in populate_shadow_context()
294 struct intel_vgpu_workload *workload; in shadow_context_status_change() local
311 workload = scheduler->current_workload[ring_id]; in shadow_context_status_change()
312 if (unlikely(!workload)) in shadow_context_status_change()
318 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
321 workload->vgpu, rq->engine); in shadow_context_status_change()
322 scheduler->engine_owner[ring_id] = workload->vgpu; in shadow_context_status_change()
325 ring_id, workload->vgpu->id); in shadow_context_status_change()
327 atomic_set(&workload->shadow_ctx_active, 1); in shadow_context_status_change()
330 save_ring_hw_state(workload->vgpu, rq->engine); in shadow_context_status_change()
331 atomic_set(&workload->shadow_ctx_active, 0); in shadow_context_status_change()
334 save_ring_hw_state(workload->vgpu, rq->engine); in shadow_context_status_change()
340 wake_up(&workload->shadow_ctx_status_wq); in shadow_context_status_change()
346 struct intel_vgpu_workload *workload) in shadow_context_descriptor_update() argument
355 desc |= (u64)workload->ctx_desc.addressing_mode << in shadow_context_descriptor_update()
361 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) in copy_workload_to_ring_buffer() argument
363 struct intel_vgpu *vgpu = workload->vgpu; in copy_workload_to_ring_buffer()
364 struct i915_request *req = workload->req; in copy_workload_to_ring_buffer()
391 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); in copy_workload_to_ring_buffer()
394 workload->rb_len); in copy_workload_to_ring_buffer()
398 shadow_ring_buffer_va = workload->shadow_ring_buffer_va; in copy_workload_to_ring_buffer()
401 workload->shadow_ring_buffer_va = cs; in copy_workload_to_ring_buffer()
404 workload->rb_len); in copy_workload_to_ring_buffer()
406 cs += workload->rb_len / sizeof(u32); in copy_workload_to_ring_buffer()
407 intel_ring_advance(workload->req, cs); in copy_workload_to_ring_buffer()
434 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, in set_context_ppgtt_from_shadow() argument
437 struct intel_vgpu_mm *mm = workload->shadow_mm; in set_context_ppgtt_from_shadow()
459 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) in intel_gvt_workload_req_alloc() argument
461 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_workload_req_alloc()
465 if (workload->req) in intel_gvt_workload_req_alloc()
468 rq = i915_request_create(s->shadow[workload->engine->id]); in intel_gvt_workload_req_alloc()
474 workload->req = i915_request_get(rq); in intel_gvt_workload_req_alloc()
486 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) in intel_gvt_scan_and_shadow_workload() argument
488 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_scan_and_shadow_workload()
494 if (workload->shadow) in intel_gvt_scan_and_shadow_workload()
497 if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated)) in intel_gvt_scan_and_shadow_workload()
498 shadow_context_descriptor_update(s->shadow[workload->engine->id], in intel_gvt_scan_and_shadow_workload()
499 workload); in intel_gvt_scan_and_shadow_workload()
501 ret = intel_gvt_scan_and_shadow_ringbuffer(workload); in intel_gvt_scan_and_shadow_workload()
505 if (workload->engine->id == RCS0 && in intel_gvt_scan_and_shadow_workload()
506 workload->wa_ctx.indirect_ctx.size) { in intel_gvt_scan_and_shadow_workload()
507 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
512 workload->shadow = true; in intel_gvt_scan_and_shadow_workload()
516 release_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
520 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
522 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) in prepare_shadow_batch_buffer() argument
524 struct intel_gvt *gvt = workload->vgpu->gvt; in prepare_shadow_batch_buffer()
530 list_for_each_entry(bb, &workload->shadow_bb, list) { in prepare_shadow_batch_buffer()
539 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va in prepare_shadow_batch_buffer()
573 ret = i915_vma_move_to_active(bb->vma, workload->req, in prepare_shadow_batch_buffer()
586 release_shadow_batch_buffer(workload); in prepare_shadow_batch_buffer()
592 struct intel_vgpu_workload *workload = in update_wa_ctx_2_shadow_ctx() local
594 struct i915_request *rq = workload->req; in update_wa_ctx_2_shadow_ctx()
650 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) in update_vreg_in_ctx() argument
652 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) = in update_vreg_in_ctx()
653 workload->rb_start; in update_vreg_in_ctx()
656 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) in release_shadow_batch_buffer() argument
660 if (list_empty(&workload->shadow_bb)) in release_shadow_batch_buffer()
663 bb = list_first_entry(&workload->shadow_bb, in release_shadow_batch_buffer()
666 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { in release_shadow_batch_buffer()
684 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) in intel_vgpu_shadow_mm_pin() argument
686 struct intel_vgpu *vgpu = workload->vgpu; in intel_vgpu_shadow_mm_pin()
690 ret = intel_vgpu_pin_mm(workload->shadow_mm); in intel_vgpu_shadow_mm_pin()
696 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || in intel_vgpu_shadow_mm_pin()
697 !workload->shadow_mm->ppgtt_mm.shadowed) { in intel_vgpu_shadow_mm_pin()
698 intel_vgpu_unpin_mm(workload->shadow_mm); in intel_vgpu_shadow_mm_pin()
703 if (!list_empty(&workload->lri_shadow_mm)) { in intel_vgpu_shadow_mm_pin()
704 list_for_each_entry(m, &workload->lri_shadow_mm, in intel_vgpu_shadow_mm_pin()
709 &workload->lri_shadow_mm, in intel_vgpu_shadow_mm_pin()
719 intel_vgpu_unpin_mm(workload->shadow_mm); in intel_vgpu_shadow_mm_pin()
725 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload) in intel_vgpu_shadow_mm_unpin() argument
729 if (!list_empty(&workload->lri_shadow_mm)) { in intel_vgpu_shadow_mm_unpin()
730 list_for_each_entry(m, &workload->lri_shadow_mm, in intel_vgpu_shadow_mm_unpin()
734 intel_vgpu_unpin_mm(workload->shadow_mm); in intel_vgpu_shadow_mm_unpin()
737 static int prepare_workload(struct intel_vgpu_workload *workload) in prepare_workload() argument
739 struct intel_vgpu *vgpu = workload->vgpu; in prepare_workload()
743 ret = intel_vgpu_shadow_mm_pin(workload); in prepare_workload()
749 update_shadow_pdps(workload); in prepare_workload()
751 set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]); in prepare_workload()
753 ret = intel_vgpu_sync_oos_pages(workload->vgpu); in prepare_workload()
759 ret = intel_vgpu_flush_post_shadow(workload->vgpu); in prepare_workload()
765 ret = copy_workload_to_ring_buffer(workload); in prepare_workload()
771 ret = prepare_shadow_batch_buffer(workload); in prepare_workload()
777 ret = prepare_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
783 if (workload->prepare) { in prepare_workload()
784 ret = workload->prepare(workload); in prepare_workload()
791 release_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
793 release_shadow_batch_buffer(workload); in prepare_workload()
795 intel_vgpu_shadow_mm_unpin(workload); in prepare_workload()
799 static int dispatch_workload(struct intel_vgpu_workload *workload) in dispatch_workload() argument
801 struct intel_vgpu *vgpu = workload->vgpu; in dispatch_workload()
806 workload->engine->name, workload); in dispatch_workload()
810 ret = intel_gvt_workload_req_alloc(workload); in dispatch_workload()
814 ret = intel_gvt_scan_and_shadow_workload(workload); in dispatch_workload()
818 ret = populate_shadow_context(workload); in dispatch_workload()
820 release_shadow_wa_ctx(&workload->wa_ctx); in dispatch_workload()
824 ret = prepare_workload(workload); in dispatch_workload()
830 rq = fetch_and_zero(&workload->req); in dispatch_workload()
834 if (!IS_ERR_OR_NULL(workload->req)) { in dispatch_workload()
836 workload->engine->name, workload->req); in dispatch_workload()
837 i915_request_add(workload->req); in dispatch_workload()
838 workload->dispatched = true; in dispatch_workload()
842 workload->status = ret; in dispatch_workload()
851 struct intel_vgpu_workload *workload = NULL; in pick_next_workload() local
879 workload = scheduler->current_workload[engine->id]; in pick_next_workload()
881 engine->name, workload); in pick_next_workload()
896 workload = scheduler->current_workload[engine->id]; in pick_next_workload()
898 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload); in pick_next_workload()
900 atomic_inc(&workload->vgpu->submission.running_workload_num); in pick_next_workload()
903 return workload; in pick_next_workload()
936 static void update_guest_context(struct intel_vgpu_workload *workload) in update_guest_context() argument
938 struct i915_request *rq = workload->req; in update_guest_context()
939 struct intel_vgpu *vgpu = workload->vgpu; in update_guest_context()
941 struct intel_context *ctx = workload->req->context; in update_guest_context()
953 workload->ctx_desc.lrca); in update_guest_context()
957 head = workload->rb_head; in update_guest_context()
958 tail = workload->rb_tail; in update_guest_context()
959 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; in update_guest_context()
989 (u32)((workload->ctx_desc.lrca + i) << in update_guest_context()
1016 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + in update_guest_context()
1017 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); in update_guest_context()
1021 if (!list_empty(&workload->lri_shadow_mm)) { in update_guest_context()
1022 struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm, in update_guest_context()
1026 update_guest_pdps(vgpu, workload->ring_context_gpa, in update_guest_context()
1031 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \ in update_guest_context()
1040 workload->ring_context_gpa + in update_guest_context()
1069 struct intel_vgpu_workload *workload = in complete_current_workload() local
1071 struct intel_vgpu *vgpu = workload->vgpu; in complete_current_workload()
1073 struct i915_request *rq = workload->req; in complete_current_workload()
1084 wait_event(workload->shadow_ctx_status_wq, in complete_current_workload()
1085 !atomic_read(&workload->shadow_ctx_active)); in complete_current_workload()
1092 if (likely(workload->status == -EINPROGRESS)) { in complete_current_workload()
1093 if (workload->req->fence.error == -EIO) in complete_current_workload()
1094 workload->status = -EIO; in complete_current_workload()
1096 workload->status = 0; in complete_current_workload()
1099 if (!workload->status && in complete_current_workload()
1101 update_guest_context(workload); in complete_current_workload()
1103 for_each_set_bit(event, workload->pending_events, in complete_current_workload()
1108 i915_request_put(fetch_and_zero(&workload->req)); in complete_current_workload()
1112 ring_id, workload, workload->status); in complete_current_workload()
1116 list_del_init(&workload->list); in complete_current_workload()
1118 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { in complete_current_workload()
1135 workload->complete(workload); in complete_current_workload()
1137 intel_vgpu_shadow_mm_unpin(workload); in complete_current_workload()
1138 intel_vgpu_destroy_workload(workload); in complete_current_workload()
1156 struct intel_vgpu_workload *workload = NULL; in workload_thread() local
1168 workload = pick_next_workload(gvt, engine); in workload_thread()
1169 if (workload) in workload_thread()
1176 if (!workload) in workload_thread()
1180 engine->name, workload, in workload_thread()
1181 workload->vgpu->id); in workload_thread()
1186 engine->name, workload); in workload_thread()
1197 update_vreg_in_ctx(workload); in workload_thread()
1199 ret = dispatch_workload(workload); in workload_thread()
1202 vgpu = workload->vgpu; in workload_thread()
1208 engine->name, workload); in workload_thread()
1209 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); in workload_thread()
1213 workload, workload->status); in workload_thread()
1517 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) in intel_vgpu_destroy_workload() argument
1519 struct intel_vgpu_submission *s = &workload->vgpu->submission; in intel_vgpu_destroy_workload()
1521 intel_context_unpin(s->shadow[workload->engine->id]); in intel_vgpu_destroy_workload()
1522 release_shadow_batch_buffer(workload); in intel_vgpu_destroy_workload()
1523 release_shadow_wa_ctx(&workload->wa_ctx); in intel_vgpu_destroy_workload()
1525 if (!list_empty(&workload->lri_shadow_mm)) { in intel_vgpu_destroy_workload()
1527 list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm, in intel_vgpu_destroy_workload()
1534 GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm)); in intel_vgpu_destroy_workload()
1535 if (workload->shadow_mm) in intel_vgpu_destroy_workload()
1536 intel_vgpu_mm_put(workload->shadow_mm); in intel_vgpu_destroy_workload()
1538 kmem_cache_free(s->workloads, workload); in intel_vgpu_destroy_workload()
1545 struct intel_vgpu_workload *workload; in alloc_workload() local
1547 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); in alloc_workload()
1548 if (!workload) in alloc_workload()
1551 INIT_LIST_HEAD(&workload->list); in alloc_workload()
1552 INIT_LIST_HEAD(&workload->shadow_bb); in alloc_workload()
1553 INIT_LIST_HEAD(&workload->lri_shadow_mm); in alloc_workload()
1555 init_waitqueue_head(&workload->shadow_ctx_status_wq); in alloc_workload()
1556 atomic_set(&workload->shadow_ctx_active, 0); in alloc_workload()
1558 workload->status = -EINPROGRESS; in alloc_workload()
1559 workload->vgpu = vgpu; in alloc_workload()
1561 return workload; in alloc_workload()
1580 static int prepare_mm(struct intel_vgpu_workload *workload) in prepare_mm() argument
1582 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; in prepare_mm()
1584 struct intel_vgpu *vgpu = workload->vgpu; in prepare_mm()
1600 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); in prepare_mm()
1602 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); in prepare_mm()
1606 workload->shadow_mm = mm; in prepare_mm()
1634 struct intel_vgpu_workload *workload = NULL; in intel_vgpu_create_workload() local
1690 workload = alloc_workload(vgpu); in intel_vgpu_create_workload()
1691 if (IS_ERR(workload)) in intel_vgpu_create_workload()
1692 return workload; in intel_vgpu_create_workload()
1694 workload->engine = engine; in intel_vgpu_create_workload()
1695 workload->ctx_desc = *desc; in intel_vgpu_create_workload()
1696 workload->ring_context_gpa = ring_context_gpa; in intel_vgpu_create_workload()
1697 workload->rb_head = head; in intel_vgpu_create_workload()
1698 workload->guest_rb_head = guest_head; in intel_vgpu_create_workload()
1699 workload->rb_tail = tail; in intel_vgpu_create_workload()
1700 workload->rb_start = start; in intel_vgpu_create_workload()
1701 workload->rb_ctl = ctl; in intel_vgpu_create_workload()
1709 workload->wa_ctx.indirect_ctx.guest_gma = in intel_vgpu_create_workload()
1711 workload->wa_ctx.indirect_ctx.size = in intel_vgpu_create_workload()
1715 if (workload->wa_ctx.indirect_ctx.size != 0) { in intel_vgpu_create_workload()
1717 workload->wa_ctx.indirect_ctx.guest_gma, in intel_vgpu_create_workload()
1718 workload->wa_ctx.indirect_ctx.size)) { in intel_vgpu_create_workload()
1720 workload->wa_ctx.indirect_ctx.guest_gma); in intel_vgpu_create_workload()
1721 kmem_cache_free(s->workloads, workload); in intel_vgpu_create_workload()
1726 workload->wa_ctx.per_ctx.guest_gma = in intel_vgpu_create_workload()
1728 workload->wa_ctx.per_ctx.valid = per_ctx & 1; in intel_vgpu_create_workload()
1729 if (workload->wa_ctx.per_ctx.valid) { in intel_vgpu_create_workload()
1731 workload->wa_ctx.per_ctx.guest_gma, in intel_vgpu_create_workload()
1734 workload->wa_ctx.per_ctx.guest_gma); in intel_vgpu_create_workload()
1735 kmem_cache_free(s->workloads, workload); in intel_vgpu_create_workload()
1742 workload, engine->name, head, tail, start, ctl); in intel_vgpu_create_workload()
1744 ret = prepare_mm(workload); in intel_vgpu_create_workload()
1746 kmem_cache_free(s->workloads, workload); in intel_vgpu_create_workload()
1757 ret = intel_gvt_scan_and_shadow_workload(workload); in intel_vgpu_create_workload()
1763 intel_vgpu_destroy_workload(workload); in intel_vgpu_create_workload()
1769 intel_vgpu_destroy_workload(workload); in intel_vgpu_create_workload()
1773 return workload; in intel_vgpu_create_workload()
1780 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) in intel_vgpu_queue_workload() argument
1782 list_add_tail(&workload->list, in intel_vgpu_queue_workload()
1783 workload_q_head(workload->vgpu, workload->engine)); in intel_vgpu_queue_workload()
1784 intel_gvt_kick_schedule(workload->vgpu->gvt); in intel_vgpu_queue_workload()
1785 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]); in intel_vgpu_queue_workload()