1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Zhi Wang <zhi.a.wang@intel.com> 25 * 26 * Contributors: 27 * Ping Gao <ping.a.gao@intel.com> 28 * Tina Zhang <tina.zhang@intel.com> 29 * Chanbin Du <changbin.du@intel.com> 30 * Min He <min.he@intel.com> 31 * Bing Niu <bing.niu@intel.com> 32 * Zhenyu Wang <zhenyuw@linux.intel.com> 33 * 34 */ 35 36 #include <linux/kthread.h> 37 38 #include "gem/i915_gem_pm.h" 39 #include "gt/intel_context.h" 40 #include "gt/intel_ring.h" 41 42 #include "i915_drv.h" 43 #include "i915_gem_gtt.h" 44 #include "gvt.h" 45 46 #define RING_CTX_OFF(x) \ 47 offsetof(struct execlist_ring_context, x) 48 49 static void set_context_pdp_root_pointer( 50 struct execlist_ring_context *ring_context, 51 u32 pdp[8]) 52 { 53 int i; 54 55 for (i = 0; i < 8; i++) 56 ring_context->pdps[i].val = pdp[7 - i]; 57 } 58 59 static void update_shadow_pdps(struct intel_vgpu_workload *workload) 60 { 61 struct execlist_ring_context *shadow_ring_context; 62 struct intel_context *ctx = workload->req->context; 63 64 if (WARN_ON(!workload->shadow_mm)) 65 return; 66 67 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) 68 return; 69 70 shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state; 71 set_context_pdp_root_pointer(shadow_ring_context, 72 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); 73 } 74 75 /* 76 * when populating shadow ctx from guest, we should not overrride oa related 77 * registers, so that they will not be overlapped by guest oa configs. Thus 78 * made it possible to capture oa data from host for both host and guests. 79 */ 80 static void sr_oa_regs(struct intel_vgpu_workload *workload, 81 u32 *reg_state, bool save) 82 { 83 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; 84 u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; 85 u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; 86 int i = 0; 87 u32 flex_mmio[] = { 88 i915_mmio_reg_offset(EU_PERF_CNTL0), 89 i915_mmio_reg_offset(EU_PERF_CNTL1), 90 i915_mmio_reg_offset(EU_PERF_CNTL2), 91 i915_mmio_reg_offset(EU_PERF_CNTL3), 92 i915_mmio_reg_offset(EU_PERF_CNTL4), 93 i915_mmio_reg_offset(EU_PERF_CNTL5), 94 i915_mmio_reg_offset(EU_PERF_CNTL6), 95 }; 96 97 if (workload->engine->id != RCS0) 98 return; 99 100 if (save) { 101 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; 102 103 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { 104 u32 state_offset = ctx_flexeu0 + i * 2; 105 106 workload->flex_mmio[i] = reg_state[state_offset + 1]; 107 } 108 } else { 109 reg_state[ctx_oactxctrl] = 110 i915_mmio_reg_offset(GEN8_OACTXCONTROL); 111 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; 112 113 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { 114 u32 state_offset = ctx_flexeu0 + i * 2; 115 u32 mmio = flex_mmio[i]; 116 117 reg_state[state_offset] = mmio; 118 reg_state[state_offset + 1] = workload->flex_mmio[i]; 119 } 120 } 121 } 122 123 static int populate_shadow_context(struct intel_vgpu_workload *workload) 124 { 125 struct intel_vgpu *vgpu = workload->vgpu; 126 struct intel_gvt *gvt = vgpu->gvt; 127 struct intel_context *ctx = workload->req->context; 128 struct execlist_ring_context *shadow_ring_context; 129 void *dst; 130 void *context_base; 131 unsigned long context_gpa, context_page_num; 132 unsigned long gpa_base; /* first gpa of consecutive GPAs */ 133 unsigned long gpa_size; /* size of consecutive GPAs */ 134 struct intel_vgpu_submission *s = &vgpu->submission; 135 int i; 136 bool skip = false; 137 int ring_id = workload->engine->id; 138 139 GEM_BUG_ON(!intel_context_is_pinned(ctx)); 140 141 context_base = (void *) ctx->lrc_reg_state - 142 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); 143 144 shadow_ring_context = (void *) ctx->lrc_reg_state; 145 146 sr_oa_regs(workload, (u32 *)shadow_ring_context, true); 147 #define COPY_REG(name) \ 148 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ 149 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 150 #define COPY_REG_MASKED(name) {\ 151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ 152 + RING_CTX_OFF(name.val),\ 153 &shadow_ring_context->name.val, 4);\ 154 shadow_ring_context->name.val |= 0xffff << 16;\ 155 } 156 157 COPY_REG_MASKED(ctx_ctrl); 158 COPY_REG(ctx_timestamp); 159 160 if (workload->engine->id == RCS0) { 161 COPY_REG(bb_per_ctx_ptr); 162 COPY_REG(rcs_indirect_ctx); 163 COPY_REG(rcs_indirect_ctx_offset); 164 } 165 #undef COPY_REG 166 #undef COPY_REG_MASKED 167 168 intel_gvt_hypervisor_read_gpa(vgpu, 169 workload->ring_context_gpa + 170 sizeof(*shadow_ring_context), 171 (void *)shadow_ring_context + 172 sizeof(*shadow_ring_context), 173 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 174 175 sr_oa_regs(workload, (u32 *)shadow_ring_context, false); 176 177 gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx", 178 workload->engine->name, workload->ctx_desc.lrca, 179 workload->ctx_desc.context_id, 180 workload->ring_context_gpa); 181 182 /* only need to ensure this context is not pinned/unpinned during the 183 * period from last submission to this this submission. 184 * Upon reaching this function, the currently submitted context is not 185 * supposed to get unpinned. If a misbehaving guest driver ever does 186 * this, it would corrupt itself. 187 */ 188 if (s->last_ctx[ring_id].valid && 189 (s->last_ctx[ring_id].lrca == 190 workload->ctx_desc.lrca) && 191 (s->last_ctx[ring_id].ring_context_gpa == 192 workload->ring_context_gpa)) 193 skip = true; 194 195 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca; 196 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa; 197 198 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip) 199 return 0; 200 201 s->last_ctx[ring_id].valid = false; 202 context_page_num = workload->engine->context_size; 203 context_page_num = context_page_num >> PAGE_SHIFT; 204 205 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0) 206 context_page_num = 19; 207 208 /* find consecutive GPAs from gma until the first inconsecutive GPA. 209 * read from the continuous GPAs into dst virtual address 210 */ 211 gpa_size = 0; 212 for (i = 2; i < context_page_num; i++) { 213 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 214 (u32)((workload->ctx_desc.lrca + i) << 215 I915_GTT_PAGE_SHIFT)); 216 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 217 gvt_vgpu_err("Invalid guest context descriptor\n"); 218 return -EFAULT; 219 } 220 221 if (gpa_size == 0) { 222 gpa_base = context_gpa; 223 dst = context_base + (i << I915_GTT_PAGE_SHIFT); 224 } else if (context_gpa != gpa_base + gpa_size) 225 goto read; 226 227 gpa_size += I915_GTT_PAGE_SIZE; 228 229 if (i == context_page_num - 1) 230 goto read; 231 232 continue; 233 234 read: 235 intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size); 236 gpa_base = context_gpa; 237 gpa_size = I915_GTT_PAGE_SIZE; 238 dst = context_base + (i << I915_GTT_PAGE_SHIFT); 239 } 240 s->last_ctx[ring_id].valid = true; 241 return 0; 242 } 243 244 static inline bool is_gvt_request(struct i915_request *rq) 245 { 246 return intel_context_force_single_submission(rq->context); 247 } 248 249 static void save_ring_hw_state(struct intel_vgpu *vgpu, 250 const struct intel_engine_cs *engine) 251 { 252 struct intel_uncore *uncore = engine->uncore; 253 i915_reg_t reg; 254 255 reg = RING_INSTDONE(engine->mmio_base); 256 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = 257 intel_uncore_read(uncore, reg); 258 259 reg = RING_ACTHD(engine->mmio_base); 260 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = 261 intel_uncore_read(uncore, reg); 262 263 reg = RING_ACTHD_UDW(engine->mmio_base); 264 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = 265 intel_uncore_read(uncore, reg); 266 } 267 268 static int shadow_context_status_change(struct notifier_block *nb, 269 unsigned long action, void *data) 270 { 271 struct i915_request *rq = data; 272 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, 273 shadow_ctx_notifier_block[rq->engine->id]); 274 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 275 enum intel_engine_id ring_id = rq->engine->id; 276 struct intel_vgpu_workload *workload; 277 unsigned long flags; 278 279 if (!is_gvt_request(rq)) { 280 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); 281 if (action == INTEL_CONTEXT_SCHEDULE_IN && 282 scheduler->engine_owner[ring_id]) { 283 /* Switch ring from vGPU to host. */ 284 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], 285 NULL, rq->engine); 286 scheduler->engine_owner[ring_id] = NULL; 287 } 288 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); 289 290 return NOTIFY_OK; 291 } 292 293 workload = scheduler->current_workload[ring_id]; 294 if (unlikely(!workload)) 295 return NOTIFY_OK; 296 297 switch (action) { 298 case INTEL_CONTEXT_SCHEDULE_IN: 299 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); 300 if (workload->vgpu != scheduler->engine_owner[ring_id]) { 301 /* Switch ring from host to vGPU or vGPU to vGPU. */ 302 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], 303 workload->vgpu, rq->engine); 304 scheduler->engine_owner[ring_id] = workload->vgpu; 305 } else 306 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", 307 ring_id, workload->vgpu->id); 308 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); 309 atomic_set(&workload->shadow_ctx_active, 1); 310 break; 311 case INTEL_CONTEXT_SCHEDULE_OUT: 312 save_ring_hw_state(workload->vgpu, rq->engine); 313 atomic_set(&workload->shadow_ctx_active, 0); 314 break; 315 case INTEL_CONTEXT_SCHEDULE_PREEMPTED: 316 save_ring_hw_state(workload->vgpu, rq->engine); 317 break; 318 default: 319 WARN_ON(1); 320 return NOTIFY_OK; 321 } 322 wake_up(&workload->shadow_ctx_status_wq); 323 return NOTIFY_OK; 324 } 325 326 static void 327 shadow_context_descriptor_update(struct intel_context *ce, 328 struct intel_vgpu_workload *workload) 329 { 330 u64 desc = ce->lrc.desc; 331 332 /* 333 * Update bits 0-11 of the context descriptor which includes flags 334 * like GEN8_CTX_* cached in desc_template 335 */ 336 desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT); 337 desc |= (u64)workload->ctx_desc.addressing_mode << 338 GEN8_CTX_ADDRESSING_MODE_SHIFT; 339 340 ce->lrc.desc = desc; 341 } 342 343 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) 344 { 345 struct intel_vgpu *vgpu = workload->vgpu; 346 struct i915_request *req = workload->req; 347 void *shadow_ring_buffer_va; 348 u32 *cs; 349 int err; 350 351 if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context)) 352 intel_vgpu_restore_inhibit_context(vgpu, req); 353 354 /* 355 * To track whether a request has started on HW, we can emit a 356 * breadcrumb at the beginning of the request and check its 357 * timeline's HWSP to see if the breadcrumb has advanced past the 358 * start of this request. Actually, the request must have the 359 * init_breadcrumb if its timeline set has_init_bread_crumb, or the 360 * scheduler might get a wrong state of it during reset. Since the 361 * requests from gvt always set the has_init_breadcrumb flag, here 362 * need to do the emit_init_breadcrumb for all the requests. 363 */ 364 if (req->engine->emit_init_breadcrumb) { 365 err = req->engine->emit_init_breadcrumb(req); 366 if (err) { 367 gvt_vgpu_err("fail to emit init breadcrumb\n"); 368 return err; 369 } 370 } 371 372 /* allocate shadow ring buffer */ 373 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); 374 if (IS_ERR(cs)) { 375 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n", 376 workload->rb_len); 377 return PTR_ERR(cs); 378 } 379 380 shadow_ring_buffer_va = workload->shadow_ring_buffer_va; 381 382 /* get shadow ring buffer va */ 383 workload->shadow_ring_buffer_va = cs; 384 385 memcpy(cs, shadow_ring_buffer_va, 386 workload->rb_len); 387 388 cs += workload->rb_len / sizeof(u32); 389 intel_ring_advance(workload->req, cs); 390 391 return 0; 392 } 393 394 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 395 { 396 if (!wa_ctx->indirect_ctx.obj) 397 return; 398 399 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); 400 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 401 402 wa_ctx->indirect_ctx.obj = NULL; 403 wa_ctx->indirect_ctx.shadow_va = NULL; 404 } 405 406 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, 407 struct intel_context *ce) 408 { 409 struct intel_vgpu_mm *mm = workload->shadow_mm; 410 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); 411 int i = 0; 412 413 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 414 px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; 415 } else { 416 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { 417 struct i915_page_directory * const pd = 418 i915_pd_entry(ppgtt->pd, i); 419 420 px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; 421 } 422 } 423 } 424 425 static int 426 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) 427 { 428 struct intel_vgpu *vgpu = workload->vgpu; 429 struct intel_vgpu_submission *s = &vgpu->submission; 430 struct i915_request *rq; 431 432 if (workload->req) 433 return 0; 434 435 rq = i915_request_create(s->shadow[workload->engine->id]); 436 if (IS_ERR(rq)) { 437 gvt_vgpu_err("fail to allocate gem request\n"); 438 return PTR_ERR(rq); 439 } 440 441 workload->req = i915_request_get(rq); 442 return 0; 443 } 444 445 /** 446 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 447 * shadow it as well, include ringbuffer,wa_ctx and ctx. 448 * @workload: an abstract entity for each execlist submission. 449 * 450 * This function is called before the workload submitting to i915, to make 451 * sure the content of the workload is valid. 452 */ 453 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 454 { 455 struct intel_vgpu *vgpu = workload->vgpu; 456 struct intel_vgpu_submission *s = &vgpu->submission; 457 int ret; 458 459 lockdep_assert_held(&vgpu->vgpu_lock); 460 461 if (workload->shadow) 462 return 0; 463 464 if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated)) 465 shadow_context_descriptor_update(s->shadow[workload->engine->id], 466 workload); 467 468 ret = intel_gvt_scan_and_shadow_ringbuffer(workload); 469 if (ret) 470 return ret; 471 472 if (workload->engine->id == RCS0 && 473 workload->wa_ctx.indirect_ctx.size) { 474 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 475 if (ret) 476 goto err_shadow; 477 } 478 479 workload->shadow = true; 480 return 0; 481 482 err_shadow: 483 release_shadow_wa_ctx(&workload->wa_ctx); 484 return ret; 485 } 486 487 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload); 488 489 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 490 { 491 struct intel_gvt *gvt = workload->vgpu->gvt; 492 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; 493 struct intel_vgpu_shadow_bb *bb; 494 int ret; 495 496 list_for_each_entry(bb, &workload->shadow_bb, list) { 497 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va 498 * is only updated into ring_scan_buffer, not real ring address 499 * allocated in later copy_workload_to_ring_buffer. pls be noted 500 * shadow_ring_buffer_va is now pointed to real ring buffer va 501 * in copy_workload_to_ring_buffer. 502 */ 503 504 if (bb->bb_offset) 505 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va 506 + bb->bb_offset; 507 508 if (bb->ppgtt) { 509 /* for non-priv bb, scan&shadow is only for 510 * debugging purpose, so the content of shadow bb 511 * is the same as original bb. Therefore, 512 * here, rather than switch to shadow bb's gma 513 * address, we directly use original batch buffer's 514 * gma address, and send original bb to hardware 515 * directly 516 */ 517 if (bb->clflush & CLFLUSH_AFTER) { 518 drm_clflush_virt_range(bb->va, 519 bb->obj->base.size); 520 bb->clflush &= ~CLFLUSH_AFTER; 521 } 522 i915_gem_object_finish_access(bb->obj); 523 bb->accessing = false; 524 525 } else { 526 bb->vma = i915_gem_object_ggtt_pin(bb->obj, 527 NULL, 0, 0, 0); 528 if (IS_ERR(bb->vma)) { 529 ret = PTR_ERR(bb->vma); 530 goto err; 531 } 532 533 /* relocate shadow batch buffer */ 534 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); 535 if (gmadr_bytes == 8) 536 bb->bb_start_cmd_va[2] = 0; 537 538 /* No one is going to touch shadow bb from now on. */ 539 if (bb->clflush & CLFLUSH_AFTER) { 540 drm_clflush_virt_range(bb->va, 541 bb->obj->base.size); 542 bb->clflush &= ~CLFLUSH_AFTER; 543 } 544 545 ret = i915_gem_object_set_to_gtt_domain(bb->obj, 546 false); 547 if (ret) 548 goto err; 549 550 ret = i915_vma_move_to_active(bb->vma, 551 workload->req, 552 0); 553 if (ret) 554 goto err; 555 556 i915_gem_object_finish_access(bb->obj); 557 bb->accessing = false; 558 } 559 } 560 return 0; 561 err: 562 release_shadow_batch_buffer(workload); 563 return ret; 564 } 565 566 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) 567 { 568 struct intel_vgpu_workload *workload = 569 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); 570 struct i915_request *rq = workload->req; 571 struct execlist_ring_context *shadow_ring_context = 572 (struct execlist_ring_context *)rq->context->lrc_reg_state; 573 574 shadow_ring_context->bb_per_ctx_ptr.val = 575 (shadow_ring_context->bb_per_ctx_ptr.val & 576 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; 577 shadow_ring_context->rcs_indirect_ctx.val = 578 (shadow_ring_context->rcs_indirect_ctx.val & 579 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; 580 } 581 582 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 583 { 584 struct i915_vma *vma; 585 unsigned char *per_ctx_va = 586 (unsigned char *)wa_ctx->indirect_ctx.shadow_va + 587 wa_ctx->indirect_ctx.size; 588 589 if (wa_ctx->indirect_ctx.size == 0) 590 return 0; 591 592 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 593 0, CACHELINE_BYTES, 0); 594 if (IS_ERR(vma)) 595 return PTR_ERR(vma); 596 597 /* FIXME: we are not tracking our pinned VMA leaving it 598 * up to the core to fix up the stray pin_count upon 599 * free. 600 */ 601 602 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); 603 604 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); 605 memset(per_ctx_va, 0, CACHELINE_BYTES); 606 607 update_wa_ctx_2_shadow_ctx(wa_ctx); 608 return 0; 609 } 610 611 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) 612 { 613 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) = 614 workload->rb_start; 615 } 616 617 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) 618 { 619 struct intel_vgpu_shadow_bb *bb, *pos; 620 621 if (list_empty(&workload->shadow_bb)) 622 return; 623 624 bb = list_first_entry(&workload->shadow_bb, 625 struct intel_vgpu_shadow_bb, list); 626 627 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { 628 if (bb->obj) { 629 if (bb->accessing) 630 i915_gem_object_finish_access(bb->obj); 631 632 if (bb->va && !IS_ERR(bb->va)) 633 i915_gem_object_unpin_map(bb->obj); 634 635 if (bb->vma && !IS_ERR(bb->vma)) 636 i915_vma_unpin(bb->vma); 637 638 i915_gem_object_put(bb->obj); 639 } 640 list_del(&bb->list); 641 kfree(bb); 642 } 643 } 644 645 static int 646 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) 647 { 648 struct intel_vgpu *vgpu = workload->vgpu; 649 struct intel_vgpu_mm *m; 650 int ret = 0; 651 652 ret = intel_vgpu_pin_mm(workload->shadow_mm); 653 if (ret) { 654 gvt_vgpu_err("fail to vgpu pin mm\n"); 655 return ret; 656 } 657 658 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || 659 !workload->shadow_mm->ppgtt_mm.shadowed) { 660 gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); 661 return -EINVAL; 662 } 663 664 if (!list_empty(&workload->lri_shadow_mm)) { 665 list_for_each_entry(m, &workload->lri_shadow_mm, 666 ppgtt_mm.link) { 667 ret = intel_vgpu_pin_mm(m); 668 if (ret) { 669 list_for_each_entry_from_reverse(m, 670 &workload->lri_shadow_mm, 671 ppgtt_mm.link) 672 intel_vgpu_unpin_mm(m); 673 gvt_vgpu_err("LRI shadow ppgtt fail to pin\n"); 674 break; 675 } 676 } 677 } 678 679 if (ret) 680 intel_vgpu_unpin_mm(workload->shadow_mm); 681 682 return ret; 683 } 684 685 static void 686 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload) 687 { 688 struct intel_vgpu_mm *m; 689 690 if (!list_empty(&workload->lri_shadow_mm)) { 691 list_for_each_entry(m, &workload->lri_shadow_mm, 692 ppgtt_mm.link) 693 intel_vgpu_unpin_mm(m); 694 } 695 intel_vgpu_unpin_mm(workload->shadow_mm); 696 } 697 698 static int prepare_workload(struct intel_vgpu_workload *workload) 699 { 700 struct intel_vgpu *vgpu = workload->vgpu; 701 struct intel_vgpu_submission *s = &vgpu->submission; 702 int ret = 0; 703 704 ret = intel_vgpu_shadow_mm_pin(workload); 705 if (ret) { 706 gvt_vgpu_err("fail to pin shadow mm\n"); 707 return ret; 708 } 709 710 update_shadow_pdps(workload); 711 712 set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]); 713 714 ret = intel_vgpu_sync_oos_pages(workload->vgpu); 715 if (ret) { 716 gvt_vgpu_err("fail to vgpu sync oos pages\n"); 717 goto err_unpin_mm; 718 } 719 720 ret = intel_vgpu_flush_post_shadow(workload->vgpu); 721 if (ret) { 722 gvt_vgpu_err("fail to flush post shadow\n"); 723 goto err_unpin_mm; 724 } 725 726 ret = copy_workload_to_ring_buffer(workload); 727 if (ret) { 728 gvt_vgpu_err("fail to generate request\n"); 729 goto err_unpin_mm; 730 } 731 732 ret = prepare_shadow_batch_buffer(workload); 733 if (ret) { 734 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); 735 goto err_unpin_mm; 736 } 737 738 ret = prepare_shadow_wa_ctx(&workload->wa_ctx); 739 if (ret) { 740 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); 741 goto err_shadow_batch; 742 } 743 744 if (workload->prepare) { 745 ret = workload->prepare(workload); 746 if (ret) 747 goto err_shadow_wa_ctx; 748 } 749 750 return 0; 751 err_shadow_wa_ctx: 752 release_shadow_wa_ctx(&workload->wa_ctx); 753 err_shadow_batch: 754 release_shadow_batch_buffer(workload); 755 err_unpin_mm: 756 intel_vgpu_shadow_mm_unpin(workload); 757 return ret; 758 } 759 760 static int dispatch_workload(struct intel_vgpu_workload *workload) 761 { 762 struct intel_vgpu *vgpu = workload->vgpu; 763 struct i915_request *rq; 764 int ret; 765 766 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n", 767 workload->engine->name, workload); 768 769 mutex_lock(&vgpu->vgpu_lock); 770 771 ret = intel_gvt_workload_req_alloc(workload); 772 if (ret) 773 goto err_req; 774 775 ret = intel_gvt_scan_and_shadow_workload(workload); 776 if (ret) 777 goto out; 778 779 ret = populate_shadow_context(workload); 780 if (ret) { 781 release_shadow_wa_ctx(&workload->wa_ctx); 782 goto out; 783 } 784 785 ret = prepare_workload(workload); 786 out: 787 if (ret) { 788 /* We might still need to add request with 789 * clean ctx to retire it properly.. 790 */ 791 rq = fetch_and_zero(&workload->req); 792 i915_request_put(rq); 793 } 794 795 if (!IS_ERR_OR_NULL(workload->req)) { 796 gvt_dbg_sched("ring id %s submit workload to i915 %p\n", 797 workload->engine->name, workload->req); 798 i915_request_add(workload->req); 799 workload->dispatched = true; 800 } 801 err_req: 802 if (ret) 803 workload->status = ret; 804 mutex_unlock(&vgpu->vgpu_lock); 805 return ret; 806 } 807 808 static struct intel_vgpu_workload * 809 pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine) 810 { 811 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 812 struct intel_vgpu_workload *workload = NULL; 813 814 mutex_lock(&gvt->sched_lock); 815 816 /* 817 * no current vgpu / will be scheduled out / no workload 818 * bail out 819 */ 820 if (!scheduler->current_vgpu) { 821 gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name); 822 goto out; 823 } 824 825 if (scheduler->need_reschedule) { 826 gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name); 827 goto out; 828 } 829 830 if (!scheduler->current_vgpu->active || 831 list_empty(workload_q_head(scheduler->current_vgpu, engine))) 832 goto out; 833 834 /* 835 * still have current workload, maybe the workload disptacher 836 * fail to submit it for some reason, resubmit it. 837 */ 838 if (scheduler->current_workload[engine->id]) { 839 workload = scheduler->current_workload[engine->id]; 840 gvt_dbg_sched("ring %s still have current workload %p\n", 841 engine->name, workload); 842 goto out; 843 } 844 845 /* 846 * pick a workload as current workload 847 * once current workload is set, schedule policy routines 848 * will wait the current workload is finished when trying to 849 * schedule out a vgpu. 850 */ 851 scheduler->current_workload[engine->id] = 852 list_first_entry(workload_q_head(scheduler->current_vgpu, 853 engine), 854 struct intel_vgpu_workload, list); 855 856 workload = scheduler->current_workload[engine->id]; 857 858 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload); 859 860 atomic_inc(&workload->vgpu->submission.running_workload_num); 861 out: 862 mutex_unlock(&gvt->sched_lock); 863 return workload; 864 } 865 866 static void update_guest_pdps(struct intel_vgpu *vgpu, 867 u64 ring_context_gpa, u32 pdp[8]) 868 { 869 u64 gpa; 870 int i; 871 872 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); 873 874 for (i = 0; i < 8; i++) 875 intel_gvt_hypervisor_write_gpa(vgpu, 876 gpa + i * 8, &pdp[7 - i], 4); 877 } 878 879 static __maybe_unused bool 880 check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m) 881 { 882 if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 883 u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32; 884 885 if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) { 886 gvt_dbg_mm("4-level context ppgtt not match LRI command\n"); 887 return false; 888 } 889 return true; 890 } else { 891 /* see comment in LRI handler in cmd_parser.c */ 892 gvt_dbg_mm("invalid shadow mm type\n"); 893 return false; 894 } 895 } 896 897 static void update_guest_context(struct intel_vgpu_workload *workload) 898 { 899 struct i915_request *rq = workload->req; 900 struct intel_vgpu *vgpu = workload->vgpu; 901 struct execlist_ring_context *shadow_ring_context; 902 struct intel_context *ctx = workload->req->context; 903 void *context_base; 904 void *src; 905 unsigned long context_gpa, context_page_num; 906 unsigned long gpa_base; /* first gpa of consecutive GPAs */ 907 unsigned long gpa_size; /* size of consecutive GPAs*/ 908 int i; 909 u32 ring_base; 910 u32 head, tail; 911 u16 wrap_count; 912 913 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, 914 workload->ctx_desc.lrca); 915 916 GEM_BUG_ON(!intel_context_is_pinned(ctx)); 917 918 head = workload->rb_head; 919 tail = workload->rb_tail; 920 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; 921 922 if (tail < head) { 923 if (wrap_count == RB_HEAD_WRAP_CNT_MAX) 924 wrap_count = 0; 925 else 926 wrap_count += 1; 927 } 928 929 head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; 930 931 ring_base = rq->engine->mmio_base; 932 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; 933 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; 934 935 context_page_num = rq->engine->context_size; 936 context_page_num = context_page_num >> PAGE_SHIFT; 937 938 if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0) 939 context_page_num = 19; 940 941 context_base = (void *) ctx->lrc_reg_state - 942 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); 943 944 /* find consecutive GPAs from gma until the first inconsecutive GPA. 945 * write to the consecutive GPAs from src virtual address 946 */ 947 gpa_size = 0; 948 for (i = 2; i < context_page_num; i++) { 949 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 950 (u32)((workload->ctx_desc.lrca + i) << 951 I915_GTT_PAGE_SHIFT)); 952 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 953 gvt_vgpu_err("invalid guest context descriptor\n"); 954 return; 955 } 956 957 if (gpa_size == 0) { 958 gpa_base = context_gpa; 959 src = context_base + (i << I915_GTT_PAGE_SHIFT); 960 } else if (context_gpa != gpa_base + gpa_size) 961 goto write; 962 963 gpa_size += I915_GTT_PAGE_SIZE; 964 965 if (i == context_page_num - 1) 966 goto write; 967 968 continue; 969 970 write: 971 intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size); 972 gpa_base = context_gpa; 973 gpa_size = I915_GTT_PAGE_SIZE; 974 src = context_base + (i << I915_GTT_PAGE_SHIFT); 975 } 976 977 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + 978 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); 979 980 shadow_ring_context = (void *) ctx->lrc_reg_state; 981 982 if (!list_empty(&workload->lri_shadow_mm)) { 983 struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm, 984 struct intel_vgpu_mm, 985 ppgtt_mm.link); 986 GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m)); 987 update_guest_pdps(vgpu, workload->ring_context_gpa, 988 (void *)m->ppgtt_mm.guest_pdps); 989 } 990 991 #define COPY_REG(name) \ 992 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ 993 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 994 995 COPY_REG(ctx_ctrl); 996 COPY_REG(ctx_timestamp); 997 998 #undef COPY_REG 999 1000 intel_gvt_hypervisor_write_gpa(vgpu, 1001 workload->ring_context_gpa + 1002 sizeof(*shadow_ring_context), 1003 (void *)shadow_ring_context + 1004 sizeof(*shadow_ring_context), 1005 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 1006 } 1007 1008 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, 1009 intel_engine_mask_t engine_mask) 1010 { 1011 struct intel_vgpu_submission *s = &vgpu->submission; 1012 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; 1013 struct intel_engine_cs *engine; 1014 struct intel_vgpu_workload *pos, *n; 1015 intel_engine_mask_t tmp; 1016 1017 /* free the unsubmited workloads in the queues. */ 1018 for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { 1019 list_for_each_entry_safe(pos, n, 1020 &s->workload_q_head[engine->id], list) { 1021 list_del_init(&pos->list); 1022 intel_vgpu_destroy_workload(pos); 1023 } 1024 clear_bit(engine->id, s->shadow_ctx_desc_updated); 1025 } 1026 } 1027 1028 static void complete_current_workload(struct intel_gvt *gvt, int ring_id) 1029 { 1030 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1031 struct intel_vgpu_workload *workload = 1032 scheduler->current_workload[ring_id]; 1033 struct intel_vgpu *vgpu = workload->vgpu; 1034 struct intel_vgpu_submission *s = &vgpu->submission; 1035 struct i915_request *rq = workload->req; 1036 int event; 1037 1038 mutex_lock(&vgpu->vgpu_lock); 1039 mutex_lock(&gvt->sched_lock); 1040 1041 /* For the workload w/ request, needs to wait for the context 1042 * switch to make sure request is completed. 1043 * For the workload w/o request, directly complete the workload. 1044 */ 1045 if (rq) { 1046 wait_event(workload->shadow_ctx_status_wq, 1047 !atomic_read(&workload->shadow_ctx_active)); 1048 1049 /* If this request caused GPU hang, req->fence.error will 1050 * be set to -EIO. Use -EIO to set workload status so 1051 * that when this request caused GPU hang, didn't trigger 1052 * context switch interrupt to guest. 1053 */ 1054 if (likely(workload->status == -EINPROGRESS)) { 1055 if (workload->req->fence.error == -EIO) 1056 workload->status = -EIO; 1057 else 1058 workload->status = 0; 1059 } 1060 1061 if (!workload->status && 1062 !(vgpu->resetting_eng & BIT(ring_id))) { 1063 update_guest_context(workload); 1064 1065 for_each_set_bit(event, workload->pending_events, 1066 INTEL_GVT_EVENT_MAX) 1067 intel_vgpu_trigger_virtual_event(vgpu, event); 1068 } 1069 1070 i915_request_put(fetch_and_zero(&workload->req)); 1071 } 1072 1073 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 1074 ring_id, workload, workload->status); 1075 1076 scheduler->current_workload[ring_id] = NULL; 1077 1078 list_del_init(&workload->list); 1079 1080 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { 1081 /* if workload->status is not successful means HW GPU 1082 * has occurred GPU hang or something wrong with i915/GVT, 1083 * and GVT won't inject context switch interrupt to guest. 1084 * So this error is a vGPU hang actually to the guest. 1085 * According to this we should emunlate a vGPU hang. If 1086 * there are pending workloads which are already submitted 1087 * from guest, we should clean them up like HW GPU does. 1088 * 1089 * if it is in middle of engine resetting, the pending 1090 * workloads won't be submitted to HW GPU and will be 1091 * cleaned up during the resetting process later, so doing 1092 * the workload clean up here doesn't have any impact. 1093 **/ 1094 intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); 1095 } 1096 1097 workload->complete(workload); 1098 1099 intel_vgpu_shadow_mm_unpin(workload); 1100 intel_vgpu_destroy_workload(workload); 1101 1102 atomic_dec(&s->running_workload_num); 1103 wake_up(&scheduler->workload_complete_wq); 1104 1105 if (gvt->scheduler.need_reschedule) 1106 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); 1107 1108 mutex_unlock(&gvt->sched_lock); 1109 mutex_unlock(&vgpu->vgpu_lock); 1110 } 1111 1112 static int workload_thread(void *arg) 1113 { 1114 struct intel_engine_cs *engine = arg; 1115 const bool need_force_wake = INTEL_GEN(engine->i915) >= 9; 1116 struct intel_gvt *gvt = engine->i915->gvt; 1117 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1118 struct intel_vgpu_workload *workload = NULL; 1119 struct intel_vgpu *vgpu = NULL; 1120 int ret; 1121 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1122 1123 gvt_dbg_core("workload thread for ring %s started\n", engine->name); 1124 1125 while (!kthread_should_stop()) { 1126 intel_wakeref_t wakeref; 1127 1128 add_wait_queue(&scheduler->waitq[engine->id], &wait); 1129 do { 1130 workload = pick_next_workload(gvt, engine); 1131 if (workload) 1132 break; 1133 wait_woken(&wait, TASK_INTERRUPTIBLE, 1134 MAX_SCHEDULE_TIMEOUT); 1135 } while (!kthread_should_stop()); 1136 remove_wait_queue(&scheduler->waitq[engine->id], &wait); 1137 1138 if (!workload) 1139 break; 1140 1141 gvt_dbg_sched("ring %s next workload %p vgpu %d\n", 1142 engine->name, workload, 1143 workload->vgpu->id); 1144 1145 wakeref = intel_runtime_pm_get(engine->uncore->rpm); 1146 1147 gvt_dbg_sched("ring %s will dispatch workload %p\n", 1148 engine->name, workload); 1149 1150 if (need_force_wake) 1151 intel_uncore_forcewake_get(engine->uncore, 1152 FORCEWAKE_ALL); 1153 /* 1154 * Update the vReg of the vGPU which submitted this 1155 * workload. The vGPU may use these registers for checking 1156 * the context state. The value comes from GPU commands 1157 * in this workload. 1158 */ 1159 update_vreg_in_ctx(workload); 1160 1161 ret = dispatch_workload(workload); 1162 1163 if (ret) { 1164 vgpu = workload->vgpu; 1165 gvt_vgpu_err("fail to dispatch workload, skip\n"); 1166 goto complete; 1167 } 1168 1169 gvt_dbg_sched("ring %s wait workload %p\n", 1170 engine->name, workload); 1171 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); 1172 1173 complete: 1174 gvt_dbg_sched("will complete workload %p, status: %d\n", 1175 workload, workload->status); 1176 1177 complete_current_workload(gvt, engine->id); 1178 1179 if (need_force_wake) 1180 intel_uncore_forcewake_put(engine->uncore, 1181 FORCEWAKE_ALL); 1182 1183 intel_runtime_pm_put(engine->uncore->rpm, wakeref); 1184 if (ret && (vgpu_is_vm_unhealthy(ret))) 1185 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1186 } 1187 return 0; 1188 } 1189 1190 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) 1191 { 1192 struct intel_vgpu_submission *s = &vgpu->submission; 1193 struct intel_gvt *gvt = vgpu->gvt; 1194 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1195 1196 if (atomic_read(&s->running_workload_num)) { 1197 gvt_dbg_sched("wait vgpu idle\n"); 1198 1199 wait_event(scheduler->workload_complete_wq, 1200 !atomic_read(&s->running_workload_num)); 1201 } 1202 } 1203 1204 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) 1205 { 1206 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1207 struct intel_engine_cs *engine; 1208 enum intel_engine_id i; 1209 1210 gvt_dbg_core("clean workload scheduler\n"); 1211 1212 for_each_engine(engine, gvt->gt, i) { 1213 atomic_notifier_chain_unregister( 1214 &engine->context_status_notifier, 1215 &gvt->shadow_ctx_notifier_block[i]); 1216 kthread_stop(scheduler->thread[i]); 1217 } 1218 } 1219 1220 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) 1221 { 1222 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1223 struct intel_engine_cs *engine; 1224 enum intel_engine_id i; 1225 int ret; 1226 1227 gvt_dbg_core("init workload scheduler\n"); 1228 1229 init_waitqueue_head(&scheduler->workload_complete_wq); 1230 1231 for_each_engine(engine, gvt->gt, i) { 1232 init_waitqueue_head(&scheduler->waitq[i]); 1233 1234 scheduler->thread[i] = kthread_run(workload_thread, engine, 1235 "gvt:%s", engine->name); 1236 if (IS_ERR(scheduler->thread[i])) { 1237 gvt_err("fail to create workload thread\n"); 1238 ret = PTR_ERR(scheduler->thread[i]); 1239 goto err; 1240 } 1241 1242 gvt->shadow_ctx_notifier_block[i].notifier_call = 1243 shadow_context_status_change; 1244 atomic_notifier_chain_register(&engine->context_status_notifier, 1245 &gvt->shadow_ctx_notifier_block[i]); 1246 } 1247 1248 return 0; 1249 1250 err: 1251 intel_gvt_clean_workload_scheduler(gvt); 1252 return ret; 1253 } 1254 1255 static void 1256 i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s, 1257 struct i915_ppgtt *ppgtt) 1258 { 1259 int i; 1260 1261 if (i915_vm_is_4lvl(&ppgtt->vm)) { 1262 px_dma(ppgtt->pd) = s->i915_context_pml4; 1263 } else { 1264 for (i = 0; i < GEN8_3LVL_PDPES; i++) { 1265 struct i915_page_directory * const pd = 1266 i915_pd_entry(ppgtt->pd, i); 1267 1268 px_dma(pd) = s->i915_context_pdps[i]; 1269 } 1270 } 1271 } 1272 1273 /** 1274 * intel_vgpu_clean_submission - free submission-related resource for vGPU 1275 * @vgpu: a vGPU 1276 * 1277 * This function is called when a vGPU is being destroyed. 1278 * 1279 */ 1280 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) 1281 { 1282 struct intel_vgpu_submission *s = &vgpu->submission; 1283 struct intel_engine_cs *engine; 1284 enum intel_engine_id id; 1285 1286 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); 1287 1288 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); 1289 for_each_engine(engine, vgpu->gvt->gt, id) 1290 intel_context_unpin(s->shadow[id]); 1291 1292 kmem_cache_destroy(s->workloads); 1293 } 1294 1295 1296 /** 1297 * intel_vgpu_reset_submission - reset submission-related resource for vGPU 1298 * @vgpu: a vGPU 1299 * @engine_mask: engines expected to be reset 1300 * 1301 * This function is called when a vGPU is being destroyed. 1302 * 1303 */ 1304 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, 1305 intel_engine_mask_t engine_mask) 1306 { 1307 struct intel_vgpu_submission *s = &vgpu->submission; 1308 1309 if (!s->active) 1310 return; 1311 1312 intel_vgpu_clean_workloads(vgpu, engine_mask); 1313 s->ops->reset(vgpu, engine_mask); 1314 } 1315 1316 static void 1317 i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, 1318 struct i915_ppgtt *ppgtt) 1319 { 1320 int i; 1321 1322 if (i915_vm_is_4lvl(&ppgtt->vm)) { 1323 s->i915_context_pml4 = px_dma(ppgtt->pd); 1324 } else { 1325 for (i = 0; i < GEN8_3LVL_PDPES; i++) { 1326 struct i915_page_directory * const pd = 1327 i915_pd_entry(ppgtt->pd, i); 1328 1329 s->i915_context_pdps[i] = px_dma(pd); 1330 } 1331 } 1332 } 1333 1334 /** 1335 * intel_vgpu_setup_submission - setup submission-related resource for vGPU 1336 * @vgpu: a vGPU 1337 * 1338 * This function is called when a vGPU is being created. 1339 * 1340 * Returns: 1341 * Zero on success, negative error code if failed. 1342 * 1343 */ 1344 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) 1345 { 1346 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 1347 struct intel_vgpu_submission *s = &vgpu->submission; 1348 struct intel_engine_cs *engine; 1349 struct i915_ppgtt *ppgtt; 1350 enum intel_engine_id i; 1351 int ret; 1352 1353 ppgtt = i915_ppgtt_create(&i915->gt); 1354 if (IS_ERR(ppgtt)) 1355 return PTR_ERR(ppgtt); 1356 1357 i915_context_ppgtt_root_save(s, ppgtt); 1358 1359 for_each_engine(engine, vgpu->gvt->gt, i) { 1360 struct intel_context *ce; 1361 1362 INIT_LIST_HEAD(&s->workload_q_head[i]); 1363 s->shadow[i] = ERR_PTR(-EINVAL); 1364 1365 ce = intel_context_create(engine); 1366 if (IS_ERR(ce)) { 1367 ret = PTR_ERR(ce); 1368 goto out_shadow_ctx; 1369 } 1370 1371 i915_vm_put(ce->vm); 1372 ce->vm = i915_vm_get(&ppgtt->vm); 1373 intel_context_set_single_submission(ce); 1374 1375 /* Max ring buffer size */ 1376 if (!intel_uc_wants_guc_submission(&engine->gt->uc)) { 1377 const unsigned int ring_size = 512 * SZ_4K; 1378 1379 ce->ring = __intel_context_ring_size(ring_size); 1380 } 1381 1382 ret = intel_context_pin(ce); 1383 intel_context_put(ce); 1384 if (ret) 1385 goto out_shadow_ctx; 1386 1387 s->shadow[i] = ce; 1388 } 1389 1390 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); 1391 1392 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", 1393 sizeof(struct intel_vgpu_workload), 0, 1394 SLAB_HWCACHE_ALIGN, 1395 offsetof(struct intel_vgpu_workload, rb_tail), 1396 sizeof_field(struct intel_vgpu_workload, rb_tail), 1397 NULL); 1398 1399 if (!s->workloads) { 1400 ret = -ENOMEM; 1401 goto out_shadow_ctx; 1402 } 1403 1404 atomic_set(&s->running_workload_num, 0); 1405 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); 1406 1407 memset(s->last_ctx, 0, sizeof(s->last_ctx)); 1408 1409 i915_vm_put(&ppgtt->vm); 1410 return 0; 1411 1412 out_shadow_ctx: 1413 i915_context_ppgtt_root_restore(s, ppgtt); 1414 for_each_engine(engine, vgpu->gvt->gt, i) { 1415 if (IS_ERR(s->shadow[i])) 1416 break; 1417 1418 intel_context_unpin(s->shadow[i]); 1419 intel_context_put(s->shadow[i]); 1420 } 1421 i915_vm_put(&ppgtt->vm); 1422 return ret; 1423 } 1424 1425 /** 1426 * intel_vgpu_select_submission_ops - select virtual submission interface 1427 * @vgpu: a vGPU 1428 * @engine_mask: either ALL_ENGINES or target engine mask 1429 * @interface: expected vGPU virtual submission interface 1430 * 1431 * This function is called when guest configures submission interface. 1432 * 1433 * Returns: 1434 * Zero on success, negative error code if failed. 1435 * 1436 */ 1437 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 1438 intel_engine_mask_t engine_mask, 1439 unsigned int interface) 1440 { 1441 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 1442 struct intel_vgpu_submission *s = &vgpu->submission; 1443 const struct intel_vgpu_submission_ops *ops[] = { 1444 [INTEL_VGPU_EXECLIST_SUBMISSION] = 1445 &intel_vgpu_execlist_submission_ops, 1446 }; 1447 int ret; 1448 1449 if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops))) 1450 return -EINVAL; 1451 1452 if (drm_WARN_ON(&i915->drm, 1453 interface == 0 && engine_mask != ALL_ENGINES)) 1454 return -EINVAL; 1455 1456 if (s->active) 1457 s->ops->clean(vgpu, engine_mask); 1458 1459 if (interface == 0) { 1460 s->ops = NULL; 1461 s->virtual_submission_interface = 0; 1462 s->active = false; 1463 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); 1464 return 0; 1465 } 1466 1467 ret = ops[interface]->init(vgpu, engine_mask); 1468 if (ret) 1469 return ret; 1470 1471 s->ops = ops[interface]; 1472 s->virtual_submission_interface = interface; 1473 s->active = true; 1474 1475 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n", 1476 vgpu->id, s->ops->name); 1477 1478 return 0; 1479 } 1480 1481 /** 1482 * intel_vgpu_destroy_workload - destroy a vGPU workload 1483 * @workload: workload to destroy 1484 * 1485 * This function is called when destroy a vGPU workload. 1486 * 1487 */ 1488 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) 1489 { 1490 struct intel_vgpu_submission *s = &workload->vgpu->submission; 1491 1492 release_shadow_batch_buffer(workload); 1493 release_shadow_wa_ctx(&workload->wa_ctx); 1494 1495 if (!list_empty(&workload->lri_shadow_mm)) { 1496 struct intel_vgpu_mm *m, *mm; 1497 list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm, 1498 ppgtt_mm.link) { 1499 list_del(&m->ppgtt_mm.link); 1500 intel_vgpu_mm_put(m); 1501 } 1502 } 1503 1504 GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm)); 1505 if (workload->shadow_mm) 1506 intel_vgpu_mm_put(workload->shadow_mm); 1507 1508 kmem_cache_free(s->workloads, workload); 1509 } 1510 1511 static struct intel_vgpu_workload * 1512 alloc_workload(struct intel_vgpu *vgpu) 1513 { 1514 struct intel_vgpu_submission *s = &vgpu->submission; 1515 struct intel_vgpu_workload *workload; 1516 1517 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); 1518 if (!workload) 1519 return ERR_PTR(-ENOMEM); 1520 1521 INIT_LIST_HEAD(&workload->list); 1522 INIT_LIST_HEAD(&workload->shadow_bb); 1523 INIT_LIST_HEAD(&workload->lri_shadow_mm); 1524 1525 init_waitqueue_head(&workload->shadow_ctx_status_wq); 1526 atomic_set(&workload->shadow_ctx_active, 0); 1527 1528 workload->status = -EINPROGRESS; 1529 workload->vgpu = vgpu; 1530 1531 return workload; 1532 } 1533 1534 #define RING_CTX_OFF(x) \ 1535 offsetof(struct execlist_ring_context, x) 1536 1537 static void read_guest_pdps(struct intel_vgpu *vgpu, 1538 u64 ring_context_gpa, u32 pdp[8]) 1539 { 1540 u64 gpa; 1541 int i; 1542 1543 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); 1544 1545 for (i = 0; i < 8; i++) 1546 intel_gvt_hypervisor_read_gpa(vgpu, 1547 gpa + i * 8, &pdp[7 - i], 4); 1548 } 1549 1550 static int prepare_mm(struct intel_vgpu_workload *workload) 1551 { 1552 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 1553 struct intel_vgpu_mm *mm; 1554 struct intel_vgpu *vgpu = workload->vgpu; 1555 enum intel_gvt_gtt_type root_entry_type; 1556 u64 pdps[GVT_RING_CTX_NR_PDPS]; 1557 1558 switch (desc->addressing_mode) { 1559 case 1: /* legacy 32-bit */ 1560 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1561 break; 1562 case 3: /* legacy 64-bit */ 1563 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1564 break; 1565 default: 1566 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); 1567 return -EINVAL; 1568 } 1569 1570 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); 1571 1572 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); 1573 if (IS_ERR(mm)) 1574 return PTR_ERR(mm); 1575 1576 workload->shadow_mm = mm; 1577 return 0; 1578 } 1579 1580 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 1581 ((a)->lrca == (b)->lrca)) 1582 1583 /** 1584 * intel_vgpu_create_workload - create a vGPU workload 1585 * @vgpu: a vGPU 1586 * @engine: the engine 1587 * @desc: a guest context descriptor 1588 * 1589 * This function is called when creating a vGPU workload. 1590 * 1591 * Returns: 1592 * struct intel_vgpu_workload * on success, negative error code in 1593 * pointer if failed. 1594 * 1595 */ 1596 struct intel_vgpu_workload * 1597 intel_vgpu_create_workload(struct intel_vgpu *vgpu, 1598 const struct intel_engine_cs *engine, 1599 struct execlist_ctx_descriptor_format *desc) 1600 { 1601 struct intel_vgpu_submission *s = &vgpu->submission; 1602 struct list_head *q = workload_q_head(vgpu, engine); 1603 struct intel_vgpu_workload *last_workload = NULL; 1604 struct intel_vgpu_workload *workload = NULL; 1605 u64 ring_context_gpa; 1606 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; 1607 u32 guest_head; 1608 int ret; 1609 1610 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 1611 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT)); 1612 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { 1613 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); 1614 return ERR_PTR(-EINVAL); 1615 } 1616 1617 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1618 RING_CTX_OFF(ring_header.val), &head, 4); 1619 1620 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1621 RING_CTX_OFF(ring_tail.val), &tail, 4); 1622 1623 guest_head = head; 1624 1625 head &= RB_HEAD_OFF_MASK; 1626 tail &= RB_TAIL_OFF_MASK; 1627 1628 list_for_each_entry_reverse(last_workload, q, list) { 1629 1630 if (same_context(&last_workload->ctx_desc, desc)) { 1631 gvt_dbg_el("ring %s cur workload == last\n", 1632 engine->name); 1633 gvt_dbg_el("ctx head %x real head %lx\n", head, 1634 last_workload->rb_tail); 1635 /* 1636 * cannot use guest context head pointer here, 1637 * as it might not be updated at this time 1638 */ 1639 head = last_workload->rb_tail; 1640 break; 1641 } 1642 } 1643 1644 gvt_dbg_el("ring %s begin a new workload\n", engine->name); 1645 1646 /* record some ring buffer register values for scan and shadow */ 1647 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1648 RING_CTX_OFF(rb_start.val), &start, 4); 1649 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1650 RING_CTX_OFF(rb_ctrl.val), &ctl, 4); 1651 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1652 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); 1653 1654 if (!intel_gvt_ggtt_validate_range(vgpu, start, 1655 _RING_CTL_BUF_SIZE(ctl))) { 1656 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); 1657 return ERR_PTR(-EINVAL); 1658 } 1659 1660 workload = alloc_workload(vgpu); 1661 if (IS_ERR(workload)) 1662 return workload; 1663 1664 workload->engine = engine; 1665 workload->ctx_desc = *desc; 1666 workload->ring_context_gpa = ring_context_gpa; 1667 workload->rb_head = head; 1668 workload->guest_rb_head = guest_head; 1669 workload->rb_tail = tail; 1670 workload->rb_start = start; 1671 workload->rb_ctl = ctl; 1672 1673 if (engine->id == RCS0) { 1674 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1675 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); 1676 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + 1677 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); 1678 1679 workload->wa_ctx.indirect_ctx.guest_gma = 1680 indirect_ctx & INDIRECT_CTX_ADDR_MASK; 1681 workload->wa_ctx.indirect_ctx.size = 1682 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * 1683 CACHELINE_BYTES; 1684 1685 if (workload->wa_ctx.indirect_ctx.size != 0) { 1686 if (!intel_gvt_ggtt_validate_range(vgpu, 1687 workload->wa_ctx.indirect_ctx.guest_gma, 1688 workload->wa_ctx.indirect_ctx.size)) { 1689 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", 1690 workload->wa_ctx.indirect_ctx.guest_gma); 1691 kmem_cache_free(s->workloads, workload); 1692 return ERR_PTR(-EINVAL); 1693 } 1694 } 1695 1696 workload->wa_ctx.per_ctx.guest_gma = 1697 per_ctx & PER_CTX_ADDR_MASK; 1698 workload->wa_ctx.per_ctx.valid = per_ctx & 1; 1699 if (workload->wa_ctx.per_ctx.valid) { 1700 if (!intel_gvt_ggtt_validate_range(vgpu, 1701 workload->wa_ctx.per_ctx.guest_gma, 1702 CACHELINE_BYTES)) { 1703 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", 1704 workload->wa_ctx.per_ctx.guest_gma); 1705 kmem_cache_free(s->workloads, workload); 1706 return ERR_PTR(-EINVAL); 1707 } 1708 } 1709 } 1710 1711 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n", 1712 workload, engine->name, head, tail, start, ctl); 1713 1714 ret = prepare_mm(workload); 1715 if (ret) { 1716 kmem_cache_free(s->workloads, workload); 1717 return ERR_PTR(ret); 1718 } 1719 1720 /* Only scan and shadow the first workload in the queue 1721 * as there is only one pre-allocated buf-obj for shadow. 1722 */ 1723 if (list_empty(q)) { 1724 intel_wakeref_t wakeref; 1725 1726 with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref) 1727 ret = intel_gvt_scan_and_shadow_workload(workload); 1728 } 1729 1730 if (ret) { 1731 if (vgpu_is_vm_unhealthy(ret)) 1732 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1733 intel_vgpu_destroy_workload(workload); 1734 return ERR_PTR(ret); 1735 } 1736 1737 return workload; 1738 } 1739 1740 /** 1741 * intel_vgpu_queue_workload - Qeue a vGPU workload 1742 * @workload: the workload to queue in 1743 */ 1744 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) 1745 { 1746 list_add_tail(&workload->list, 1747 workload_q_head(workload->vgpu, workload->engine)); 1748 intel_gvt_kick_schedule(workload->vgpu->gvt); 1749 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]); 1750 } 1751