xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/scheduler.c (revision 4da722ca)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Zhi Wang <zhi.a.wang@intel.com>
25  *
26  * Contributors:
27  *    Ping Gao <ping.a.gao@intel.com>
28  *    Tina Zhang <tina.zhang@intel.com>
29  *    Chanbin Du <changbin.du@intel.com>
30  *    Min He <min.he@intel.com>
31  *    Bing Niu <bing.niu@intel.com>
32  *    Zhenyu Wang <zhenyuw@linux.intel.com>
33  *
34  */
35 
36 #include <linux/kthread.h>
37 
38 #include "i915_drv.h"
39 #include "gvt.h"
40 
41 #define RING_CTX_OFF(x) \
42 	offsetof(struct execlist_ring_context, x)
43 
44 static void set_context_pdp_root_pointer(
45 		struct execlist_ring_context *ring_context,
46 		u32 pdp[8])
47 {
48 	struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 	int i;
50 
51 	for (i = 0; i < 8; i++)
52 		pdp_pair[i].val = pdp[7 - i];
53 }
54 
55 static int populate_shadow_context(struct intel_vgpu_workload *workload)
56 {
57 	struct intel_vgpu *vgpu = workload->vgpu;
58 	struct intel_gvt *gvt = vgpu->gvt;
59 	int ring_id = workload->ring_id;
60 	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 	struct drm_i915_gem_object *ctx_obj =
62 		shadow_ctx->engine[ring_id].state->obj;
63 	struct execlist_ring_context *shadow_ring_context;
64 	struct page *page;
65 	void *dst;
66 	unsigned long context_gpa, context_page_num;
67 	int i;
68 
69 	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 			workload->ctx_desc.lrca);
71 
72 	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
73 
74 	context_page_num = context_page_num >> PAGE_SHIFT;
75 
76 	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
77 		context_page_num = 19;
78 
79 	i = 2;
80 
81 	while (i < context_page_num) {
82 		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
83 				(u32)((workload->ctx_desc.lrca + i) <<
84 				GTT_PAGE_SHIFT));
85 		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
86 			gvt_vgpu_err("Invalid guest context descriptor\n");
87 			return -EINVAL;
88 		}
89 
90 		page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
91 		dst = kmap(page);
92 		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
93 				GTT_PAGE_SIZE);
94 		kunmap(page);
95 		i++;
96 	}
97 
98 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
99 	shadow_ring_context = kmap(page);
100 
101 #define COPY_REG(name) \
102 	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 		+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
104 
105 	COPY_REG(ctx_ctrl);
106 	COPY_REG(ctx_timestamp);
107 
108 	if (ring_id == RCS) {
109 		COPY_REG(bb_per_ctx_ptr);
110 		COPY_REG(rcs_indirect_ctx);
111 		COPY_REG(rcs_indirect_ctx_offset);
112 	}
113 #undef COPY_REG
114 
115 	set_context_pdp_root_pointer(shadow_ring_context,
116 				     workload->shadow_mm->shadow_page_table);
117 
118 	intel_gvt_hypervisor_read_gpa(vgpu,
119 			workload->ring_context_gpa +
120 			sizeof(*shadow_ring_context),
121 			(void *)shadow_ring_context +
122 			sizeof(*shadow_ring_context),
123 			GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124 
125 	kunmap(page);
126 	return 0;
127 }
128 
129 static inline bool is_gvt_request(struct drm_i915_gem_request *req)
130 {
131 	return i915_gem_context_force_single_submission(req->ctx);
132 }
133 
134 static int shadow_context_status_change(struct notifier_block *nb,
135 		unsigned long action, void *data)
136 {
137 	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
138 	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
139 				shadow_ctx_notifier_block[req->engine->id]);
140 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
141 	enum intel_engine_id ring_id = req->engine->id;
142 	struct intel_vgpu_workload *workload;
143 
144 	if (!is_gvt_request(req)) {
145 		spin_lock_bh(&scheduler->mmio_context_lock);
146 		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
147 		    scheduler->engine_owner[ring_id]) {
148 			/* Switch ring from vGPU to host. */
149 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
150 					      NULL, ring_id);
151 			scheduler->engine_owner[ring_id] = NULL;
152 		}
153 		spin_unlock_bh(&scheduler->mmio_context_lock);
154 
155 		return NOTIFY_OK;
156 	}
157 
158 	workload = scheduler->current_workload[ring_id];
159 	if (unlikely(!workload))
160 		return NOTIFY_OK;
161 
162 	switch (action) {
163 	case INTEL_CONTEXT_SCHEDULE_IN:
164 		spin_lock_bh(&scheduler->mmio_context_lock);
165 		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
166 			/* Switch ring from host to vGPU or vGPU to vGPU. */
167 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
168 					      workload->vgpu, ring_id);
169 			scheduler->engine_owner[ring_id] = workload->vgpu;
170 		} else
171 			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
172 				      ring_id, workload->vgpu->id);
173 		spin_unlock_bh(&scheduler->mmio_context_lock);
174 		atomic_set(&workload->shadow_ctx_active, 1);
175 		break;
176 	case INTEL_CONTEXT_SCHEDULE_OUT:
177 		atomic_set(&workload->shadow_ctx_active, 0);
178 		break;
179 	default:
180 		WARN_ON(1);
181 		return NOTIFY_OK;
182 	}
183 	wake_up(&workload->shadow_ctx_status_wq);
184 	return NOTIFY_OK;
185 }
186 
187 static int dispatch_workload(struct intel_vgpu_workload *workload)
188 {
189 	int ring_id = workload->ring_id;
190 	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
191 	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
192 	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
193 	struct drm_i915_gem_request *rq;
194 	struct intel_vgpu *vgpu = workload->vgpu;
195 	struct intel_ring *ring;
196 	int ret;
197 
198 	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
199 		ring_id, workload);
200 
201 	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
202 	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
203 				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
204 
205 	mutex_lock(&dev_priv->drm.struct_mutex);
206 
207 	/* pin shadow context by gvt even the shadow context will be pinned
208 	 * when i915 alloc request. That is because gvt will update the guest
209 	 * context from shadow context when workload is completed, and at that
210 	 * moment, i915 may already unpined the shadow context to make the
211 	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
212 	 * the guest context, gvt can unpin the shadow_ctx safely.
213 	 */
214 	ring = engine->context_pin(engine, shadow_ctx);
215 	if (IS_ERR(ring)) {
216 		ret = PTR_ERR(ring);
217 		gvt_vgpu_err("fail to pin shadow context\n");
218 		workload->status = ret;
219 		mutex_unlock(&dev_priv->drm.struct_mutex);
220 		return ret;
221 	}
222 
223 	rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
224 	if (IS_ERR(rq)) {
225 		gvt_vgpu_err("fail to allocate gem request\n");
226 		ret = PTR_ERR(rq);
227 		goto out;
228 	}
229 
230 	gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
231 
232 	workload->req = i915_gem_request_get(rq);
233 
234 	ret = intel_gvt_scan_and_shadow_workload(workload);
235 	if (ret)
236 		goto out;
237 
238 	if ((workload->ring_id == RCS) &&
239 	    (workload->wa_ctx.indirect_ctx.size != 0)) {
240 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
241 		if (ret)
242 			goto out;
243 	}
244 
245 	ret = populate_shadow_context(workload);
246 	if (ret)
247 		goto out;
248 
249 	if (workload->prepare) {
250 		ret = workload->prepare(workload);
251 		if (ret)
252 			goto out;
253 	}
254 
255 	gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
256 			ring_id, workload->req);
257 
258 	ret = 0;
259 	workload->dispatched = true;
260 out:
261 	if (ret)
262 		workload->status = ret;
263 
264 	if (!IS_ERR_OR_NULL(rq))
265 		i915_add_request(rq);
266 	else
267 		engine->context_unpin(engine, shadow_ctx);
268 
269 	mutex_unlock(&dev_priv->drm.struct_mutex);
270 	return ret;
271 }
272 
273 static struct intel_vgpu_workload *pick_next_workload(
274 		struct intel_gvt *gvt, int ring_id)
275 {
276 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
277 	struct intel_vgpu_workload *workload = NULL;
278 
279 	mutex_lock(&gvt->lock);
280 
281 	/*
282 	 * no current vgpu / will be scheduled out / no workload
283 	 * bail out
284 	 */
285 	if (!scheduler->current_vgpu) {
286 		gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
287 		goto out;
288 	}
289 
290 	if (scheduler->need_reschedule) {
291 		gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
292 		goto out;
293 	}
294 
295 	if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
296 		goto out;
297 
298 	/*
299 	 * still have current workload, maybe the workload disptacher
300 	 * fail to submit it for some reason, resubmit it.
301 	 */
302 	if (scheduler->current_workload[ring_id]) {
303 		workload = scheduler->current_workload[ring_id];
304 		gvt_dbg_sched("ring id %d still have current workload %p\n",
305 				ring_id, workload);
306 		goto out;
307 	}
308 
309 	/*
310 	 * pick a workload as current workload
311 	 * once current workload is set, schedule policy routines
312 	 * will wait the current workload is finished when trying to
313 	 * schedule out a vgpu.
314 	 */
315 	scheduler->current_workload[ring_id] = container_of(
316 			workload_q_head(scheduler->current_vgpu, ring_id)->next,
317 			struct intel_vgpu_workload, list);
318 
319 	workload = scheduler->current_workload[ring_id];
320 
321 	gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
322 
323 	atomic_inc(&workload->vgpu->running_workload_num);
324 out:
325 	mutex_unlock(&gvt->lock);
326 	return workload;
327 }
328 
329 static void update_guest_context(struct intel_vgpu_workload *workload)
330 {
331 	struct intel_vgpu *vgpu = workload->vgpu;
332 	struct intel_gvt *gvt = vgpu->gvt;
333 	int ring_id = workload->ring_id;
334 	struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
335 	struct drm_i915_gem_object *ctx_obj =
336 		shadow_ctx->engine[ring_id].state->obj;
337 	struct execlist_ring_context *shadow_ring_context;
338 	struct page *page;
339 	void *src;
340 	unsigned long context_gpa, context_page_num;
341 	int i;
342 
343 	gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
344 			workload->ctx_desc.lrca);
345 
346 	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
347 
348 	context_page_num = context_page_num >> PAGE_SHIFT;
349 
350 	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
351 		context_page_num = 19;
352 
353 	i = 2;
354 
355 	while (i < context_page_num) {
356 		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
357 				(u32)((workload->ctx_desc.lrca + i) <<
358 					GTT_PAGE_SHIFT));
359 		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
360 			gvt_vgpu_err("invalid guest context descriptor\n");
361 			return;
362 		}
363 
364 		page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
365 		src = kmap(page);
366 		intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
367 				GTT_PAGE_SIZE);
368 		kunmap(page);
369 		i++;
370 	}
371 
372 	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
373 		RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
374 
375 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
376 	shadow_ring_context = kmap(page);
377 
378 #define COPY_REG(name) \
379 	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
380 		RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
381 
382 	COPY_REG(ctx_ctrl);
383 	COPY_REG(ctx_timestamp);
384 
385 #undef COPY_REG
386 
387 	intel_gvt_hypervisor_write_gpa(vgpu,
388 			workload->ring_context_gpa +
389 			sizeof(*shadow_ring_context),
390 			(void *)shadow_ring_context +
391 			sizeof(*shadow_ring_context),
392 			GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
393 
394 	kunmap(page);
395 }
396 
397 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
398 {
399 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
400 	struct intel_vgpu_workload *workload;
401 	struct intel_vgpu *vgpu;
402 	int event;
403 
404 	mutex_lock(&gvt->lock);
405 
406 	workload = scheduler->current_workload[ring_id];
407 	vgpu = workload->vgpu;
408 
409 	/* For the workload w/ request, needs to wait for the context
410 	 * switch to make sure request is completed.
411 	 * For the workload w/o request, directly complete the workload.
412 	 */
413 	if (workload->req) {
414 		struct drm_i915_private *dev_priv =
415 			workload->vgpu->gvt->dev_priv;
416 		struct intel_engine_cs *engine =
417 			dev_priv->engine[workload->ring_id];
418 		wait_event(workload->shadow_ctx_status_wq,
419 			   !atomic_read(&workload->shadow_ctx_active));
420 
421 		/* If this request caused GPU hang, req->fence.error will
422 		 * be set to -EIO. Use -EIO to set workload status so
423 		 * that when this request caused GPU hang, didn't trigger
424 		 * context switch interrupt to guest.
425 		 */
426 		if (likely(workload->status == -EINPROGRESS)) {
427 			if (workload->req->fence.error == -EIO)
428 				workload->status = -EIO;
429 			else
430 				workload->status = 0;
431 		}
432 
433 		i915_gem_request_put(fetch_and_zero(&workload->req));
434 
435 		if (!workload->status && !vgpu->resetting) {
436 			update_guest_context(workload);
437 
438 			for_each_set_bit(event, workload->pending_events,
439 					 INTEL_GVT_EVENT_MAX)
440 				intel_vgpu_trigger_virtual_event(vgpu, event);
441 		}
442 		mutex_lock(&dev_priv->drm.struct_mutex);
443 		/* unpin shadow ctx as the shadow_ctx update is done */
444 		engine->context_unpin(engine, workload->vgpu->shadow_ctx);
445 		mutex_unlock(&dev_priv->drm.struct_mutex);
446 	}
447 
448 	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
449 			ring_id, workload, workload->status);
450 
451 	scheduler->current_workload[ring_id] = NULL;
452 
453 	list_del_init(&workload->list);
454 	workload->complete(workload);
455 
456 	atomic_dec(&vgpu->running_workload_num);
457 	wake_up(&scheduler->workload_complete_wq);
458 
459 	if (gvt->scheduler.need_reschedule)
460 		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
461 
462 	mutex_unlock(&gvt->lock);
463 }
464 
465 struct workload_thread_param {
466 	struct intel_gvt *gvt;
467 	int ring_id;
468 };
469 
470 static int workload_thread(void *priv)
471 {
472 	struct workload_thread_param *p = (struct workload_thread_param *)priv;
473 	struct intel_gvt *gvt = p->gvt;
474 	int ring_id = p->ring_id;
475 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
476 	struct intel_vgpu_workload *workload = NULL;
477 	struct intel_vgpu *vgpu = NULL;
478 	int ret;
479 	bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
480 			|| IS_KABYLAKE(gvt->dev_priv);
481 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
482 
483 	kfree(p);
484 
485 	gvt_dbg_core("workload thread for ring %d started\n", ring_id);
486 
487 	while (!kthread_should_stop()) {
488 		add_wait_queue(&scheduler->waitq[ring_id], &wait);
489 		do {
490 			workload = pick_next_workload(gvt, ring_id);
491 			if (workload)
492 				break;
493 			wait_woken(&wait, TASK_INTERRUPTIBLE,
494 				   MAX_SCHEDULE_TIMEOUT);
495 		} while (!kthread_should_stop());
496 		remove_wait_queue(&scheduler->waitq[ring_id], &wait);
497 
498 		if (!workload)
499 			break;
500 
501 		gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
502 				workload->ring_id, workload,
503 				workload->vgpu->id);
504 
505 		intel_runtime_pm_get(gvt->dev_priv);
506 
507 		gvt_dbg_sched("ring id %d will dispatch workload %p\n",
508 				workload->ring_id, workload);
509 
510 		if (need_force_wake)
511 			intel_uncore_forcewake_get(gvt->dev_priv,
512 					FORCEWAKE_ALL);
513 
514 		mutex_lock(&gvt->lock);
515 		ret = dispatch_workload(workload);
516 		mutex_unlock(&gvt->lock);
517 
518 		if (ret) {
519 			vgpu = workload->vgpu;
520 			gvt_vgpu_err("fail to dispatch workload, skip\n");
521 			goto complete;
522 		}
523 
524 		gvt_dbg_sched("ring id %d wait workload %p\n",
525 				workload->ring_id, workload);
526 		i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
527 
528 complete:
529 		gvt_dbg_sched("will complete workload %p, status: %d\n",
530 				workload, workload->status);
531 
532 		complete_current_workload(gvt, ring_id);
533 
534 		if (need_force_wake)
535 			intel_uncore_forcewake_put(gvt->dev_priv,
536 					FORCEWAKE_ALL);
537 
538 		intel_runtime_pm_put(gvt->dev_priv);
539 	}
540 	return 0;
541 }
542 
543 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
544 {
545 	struct intel_gvt *gvt = vgpu->gvt;
546 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
547 
548 	if (atomic_read(&vgpu->running_workload_num)) {
549 		gvt_dbg_sched("wait vgpu idle\n");
550 
551 		wait_event(scheduler->workload_complete_wq,
552 				!atomic_read(&vgpu->running_workload_num));
553 	}
554 }
555 
556 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
557 {
558 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
559 	struct intel_engine_cs *engine;
560 	enum intel_engine_id i;
561 
562 	gvt_dbg_core("clean workload scheduler\n");
563 
564 	for_each_engine(engine, gvt->dev_priv, i) {
565 		atomic_notifier_chain_unregister(
566 					&engine->context_status_notifier,
567 					&gvt->shadow_ctx_notifier_block[i]);
568 		kthread_stop(scheduler->thread[i]);
569 	}
570 }
571 
572 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
573 {
574 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
575 	struct workload_thread_param *param = NULL;
576 	struct intel_engine_cs *engine;
577 	enum intel_engine_id i;
578 	int ret;
579 
580 	gvt_dbg_core("init workload scheduler\n");
581 
582 	init_waitqueue_head(&scheduler->workload_complete_wq);
583 
584 	for_each_engine(engine, gvt->dev_priv, i) {
585 		init_waitqueue_head(&scheduler->waitq[i]);
586 
587 		param = kzalloc(sizeof(*param), GFP_KERNEL);
588 		if (!param) {
589 			ret = -ENOMEM;
590 			goto err;
591 		}
592 
593 		param->gvt = gvt;
594 		param->ring_id = i;
595 
596 		scheduler->thread[i] = kthread_run(workload_thread, param,
597 			"gvt workload %d", i);
598 		if (IS_ERR(scheduler->thread[i])) {
599 			gvt_err("fail to create workload thread\n");
600 			ret = PTR_ERR(scheduler->thread[i]);
601 			goto err;
602 		}
603 
604 		gvt->shadow_ctx_notifier_block[i].notifier_call =
605 					shadow_context_status_change;
606 		atomic_notifier_chain_register(&engine->context_status_notifier,
607 					&gvt->shadow_ctx_notifier_block[i]);
608 	}
609 	return 0;
610 err:
611 	intel_gvt_clean_workload_scheduler(gvt);
612 	kfree(param);
613 	param = NULL;
614 	return ret;
615 }
616 
617 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
618 {
619 	i915_gem_context_put_unlocked(vgpu->shadow_ctx);
620 }
621 
622 int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
623 {
624 	atomic_set(&vgpu->running_workload_num, 0);
625 
626 	vgpu->shadow_ctx = i915_gem_context_create_gvt(
627 			&vgpu->gvt->dev_priv->drm);
628 	if (IS_ERR(vgpu->shadow_ctx))
629 		return PTR_ERR(vgpu->shadow_ctx);
630 
631 	vgpu->shadow_ctx->engine[RCS].initialised = true;
632 
633 	return 0;
634 }
635