1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Zhi Wang <zhi.a.wang@intel.com> 25 * 26 * Contributors: 27 * Ping Gao <ping.a.gao@intel.com> 28 * Tina Zhang <tina.zhang@intel.com> 29 * Chanbin Du <changbin.du@intel.com> 30 * Min He <min.he@intel.com> 31 * Bing Niu <bing.niu@intel.com> 32 * Zhenyu Wang <zhenyuw@linux.intel.com> 33 * 34 */ 35 36 #include <linux/kthread.h> 37 38 #include "gem/i915_gem_pm.h" 39 #include "gt/intel_context.h" 40 #include "gt/intel_execlists_submission.h" 41 #include "gt/intel_gt_regs.h" 42 #include "gt/intel_lrc.h" 43 #include "gt/intel_ring.h" 44 45 #include "i915_drv.h" 46 #include "i915_gem_gtt.h" 47 #include "i915_perf_oa_regs.h" 48 #include "gvt.h" 49 50 #define RING_CTX_OFF(x) \ 51 offsetof(struct execlist_ring_context, x) 52 53 static void set_context_pdp_root_pointer( 54 struct execlist_ring_context *ring_context, 55 u32 pdp[8]) 56 { 57 int i; 58 59 for (i = 0; i < 8; i++) 60 ring_context->pdps[i].val = pdp[7 - i]; 61 } 62 63 static void update_shadow_pdps(struct intel_vgpu_workload *workload) 64 { 65 struct execlist_ring_context *shadow_ring_context; 66 struct intel_context *ctx = workload->req->context; 67 68 if (WARN_ON(!workload->shadow_mm)) 69 return; 70 71 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) 72 return; 73 74 shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state; 75 set_context_pdp_root_pointer(shadow_ring_context, 76 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); 77 } 78 79 /* 80 * when populating shadow ctx from guest, we should not overrride oa related 81 * registers, so that they will not be overlapped by guest oa configs. Thus 82 * made it possible to capture oa data from host for both host and guests. 83 */ 84 static void sr_oa_regs(struct intel_vgpu_workload *workload, 85 u32 *reg_state, bool save) 86 { 87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; 88 u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; 89 u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; 90 int i = 0; 91 u32 flex_mmio[] = { 92 i915_mmio_reg_offset(EU_PERF_CNTL0), 93 i915_mmio_reg_offset(EU_PERF_CNTL1), 94 i915_mmio_reg_offset(EU_PERF_CNTL2), 95 i915_mmio_reg_offset(EU_PERF_CNTL3), 96 i915_mmio_reg_offset(EU_PERF_CNTL4), 97 i915_mmio_reg_offset(EU_PERF_CNTL5), 98 i915_mmio_reg_offset(EU_PERF_CNTL6), 99 }; 100 101 if (workload->engine->id != RCS0) 102 return; 103 104 if (save) { 105 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; 106 107 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { 108 u32 state_offset = ctx_flexeu0 + i * 2; 109 110 workload->flex_mmio[i] = reg_state[state_offset + 1]; 111 } 112 } else { 113 reg_state[ctx_oactxctrl] = 114 i915_mmio_reg_offset(GEN8_OACTXCONTROL); 115 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; 116 117 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { 118 u32 state_offset = ctx_flexeu0 + i * 2; 119 u32 mmio = flex_mmio[i]; 120 121 reg_state[state_offset] = mmio; 122 reg_state[state_offset + 1] = workload->flex_mmio[i]; 123 } 124 } 125 } 126 127 static int populate_shadow_context(struct intel_vgpu_workload *workload) 128 { 129 struct intel_vgpu *vgpu = workload->vgpu; 130 struct intel_gvt *gvt = vgpu->gvt; 131 struct intel_context *ctx = workload->req->context; 132 struct execlist_ring_context *shadow_ring_context; 133 void *dst; 134 void *context_base; 135 unsigned long context_gpa, context_page_num; 136 unsigned long gpa_base; /* first gpa of consecutive GPAs */ 137 unsigned long gpa_size; /* size of consecutive GPAs */ 138 struct intel_vgpu_submission *s = &vgpu->submission; 139 int i; 140 bool skip = false; 141 int ring_id = workload->engine->id; 142 int ret; 143 144 GEM_BUG_ON(!intel_context_is_pinned(ctx)); 145 146 context_base = (void *) ctx->lrc_reg_state - 147 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); 148 149 shadow_ring_context = (void *) ctx->lrc_reg_state; 150 151 sr_oa_regs(workload, (u32 *)shadow_ring_context, true); 152 #define COPY_REG(name) \ 153 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ 154 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 155 #define COPY_REG_MASKED(name) {\ 156 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ 157 + RING_CTX_OFF(name.val),\ 158 &shadow_ring_context->name.val, 4);\ 159 shadow_ring_context->name.val |= 0xffff << 16;\ 160 } 161 162 COPY_REG_MASKED(ctx_ctrl); 163 COPY_REG(ctx_timestamp); 164 165 if (workload->engine->id == RCS0) { 166 COPY_REG(bb_per_ctx_ptr); 167 COPY_REG(rcs_indirect_ctx); 168 COPY_REG(rcs_indirect_ctx_offset); 169 } else if (workload->engine->id == BCS0) 170 intel_gvt_read_gpa(vgpu, 171 workload->ring_context_gpa + 172 BCS_TILE_REGISTER_VAL_OFFSET, 173 (void *)shadow_ring_context + 174 BCS_TILE_REGISTER_VAL_OFFSET, 4); 175 #undef COPY_REG 176 #undef COPY_REG_MASKED 177 178 /* don't copy Ring Context (the first 0x50 dwords), 179 * only copy the Engine Context part from guest 180 */ 181 intel_gvt_read_gpa(vgpu, 182 workload->ring_context_gpa + 183 RING_CTX_SIZE, 184 (void *)shadow_ring_context + 185 RING_CTX_SIZE, 186 I915_GTT_PAGE_SIZE - RING_CTX_SIZE); 187 188 sr_oa_regs(workload, (u32 *)shadow_ring_context, false); 189 190 gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx", 191 workload->engine->name, workload->ctx_desc.lrca, 192 workload->ctx_desc.context_id, 193 workload->ring_context_gpa); 194 195 /* only need to ensure this context is not pinned/unpinned during the 196 * period from last submission to this this submission. 197 * Upon reaching this function, the currently submitted context is not 198 * supposed to get unpinned. If a misbehaving guest driver ever does 199 * this, it would corrupt itself. 200 */ 201 if (s->last_ctx[ring_id].valid && 202 (s->last_ctx[ring_id].lrca == 203 workload->ctx_desc.lrca) && 204 (s->last_ctx[ring_id].ring_context_gpa == 205 workload->ring_context_gpa)) 206 skip = true; 207 208 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca; 209 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa; 210 211 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip) 212 return 0; 213 214 s->last_ctx[ring_id].valid = false; 215 context_page_num = workload->engine->context_size; 216 context_page_num = context_page_num >> PAGE_SHIFT; 217 218 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0) 219 context_page_num = 19; 220 221 /* find consecutive GPAs from gma until the first inconsecutive GPA. 222 * read from the continuous GPAs into dst virtual address 223 */ 224 gpa_size = 0; 225 for (i = 2; i < context_page_num; i++) { 226 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 227 (u32)((workload->ctx_desc.lrca + i) << 228 I915_GTT_PAGE_SHIFT)); 229 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 230 gvt_vgpu_err("Invalid guest context descriptor\n"); 231 return -EFAULT; 232 } 233 234 if (gpa_size == 0) { 235 gpa_base = context_gpa; 236 dst = context_base + (i << I915_GTT_PAGE_SHIFT); 237 } else if (context_gpa != gpa_base + gpa_size) 238 goto read; 239 240 gpa_size += I915_GTT_PAGE_SIZE; 241 242 if (i == context_page_num - 1) 243 goto read; 244 245 continue; 246 247 read: 248 intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size); 249 gpa_base = context_gpa; 250 gpa_size = I915_GTT_PAGE_SIZE; 251 dst = context_base + (i << I915_GTT_PAGE_SHIFT); 252 } 253 ret = intel_gvt_scan_engine_context(workload); 254 if (ret) { 255 gvt_vgpu_err("invalid cmd found in guest context pages\n"); 256 return ret; 257 } 258 s->last_ctx[ring_id].valid = true; 259 return 0; 260 } 261 262 static inline bool is_gvt_request(struct i915_request *rq) 263 { 264 return intel_context_force_single_submission(rq->context); 265 } 266 267 static void save_ring_hw_state(struct intel_vgpu *vgpu, 268 const struct intel_engine_cs *engine) 269 { 270 struct intel_uncore *uncore = engine->uncore; 271 i915_reg_t reg; 272 273 reg = RING_INSTDONE(engine->mmio_base); 274 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = 275 intel_uncore_read(uncore, reg); 276 277 reg = RING_ACTHD(engine->mmio_base); 278 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = 279 intel_uncore_read(uncore, reg); 280 281 reg = RING_ACTHD_UDW(engine->mmio_base); 282 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = 283 intel_uncore_read(uncore, reg); 284 } 285 286 static int shadow_context_status_change(struct notifier_block *nb, 287 unsigned long action, void *data) 288 { 289 struct i915_request *rq = data; 290 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, 291 shadow_ctx_notifier_block[rq->engine->id]); 292 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 293 enum intel_engine_id ring_id = rq->engine->id; 294 struct intel_vgpu_workload *workload; 295 unsigned long flags; 296 297 if (!is_gvt_request(rq)) { 298 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); 299 if (action == INTEL_CONTEXT_SCHEDULE_IN && 300 scheduler->engine_owner[ring_id]) { 301 /* Switch ring from vGPU to host. */ 302 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], 303 NULL, rq->engine); 304 scheduler->engine_owner[ring_id] = NULL; 305 } 306 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); 307 308 return NOTIFY_OK; 309 } 310 311 workload = scheduler->current_workload[ring_id]; 312 if (unlikely(!workload)) 313 return NOTIFY_OK; 314 315 switch (action) { 316 case INTEL_CONTEXT_SCHEDULE_IN: 317 spin_lock_irqsave(&scheduler->mmio_context_lock, flags); 318 if (workload->vgpu != scheduler->engine_owner[ring_id]) { 319 /* Switch ring from host to vGPU or vGPU to vGPU. */ 320 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], 321 workload->vgpu, rq->engine); 322 scheduler->engine_owner[ring_id] = workload->vgpu; 323 } else 324 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", 325 ring_id, workload->vgpu->id); 326 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); 327 atomic_set(&workload->shadow_ctx_active, 1); 328 break; 329 case INTEL_CONTEXT_SCHEDULE_OUT: 330 save_ring_hw_state(workload->vgpu, rq->engine); 331 atomic_set(&workload->shadow_ctx_active, 0); 332 break; 333 case INTEL_CONTEXT_SCHEDULE_PREEMPTED: 334 save_ring_hw_state(workload->vgpu, rq->engine); 335 break; 336 default: 337 WARN_ON(1); 338 return NOTIFY_OK; 339 } 340 wake_up(&workload->shadow_ctx_status_wq); 341 return NOTIFY_OK; 342 } 343 344 static void 345 shadow_context_descriptor_update(struct intel_context *ce, 346 struct intel_vgpu_workload *workload) 347 { 348 u64 desc = ce->lrc.desc; 349 350 /* 351 * Update bits 0-11 of the context descriptor which includes flags 352 * like GEN8_CTX_* cached in desc_template 353 */ 354 desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT); 355 desc |= (u64)workload->ctx_desc.addressing_mode << 356 GEN8_CTX_ADDRESSING_MODE_SHIFT; 357 358 ce->lrc.desc = desc; 359 } 360 361 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) 362 { 363 struct intel_vgpu *vgpu = workload->vgpu; 364 struct i915_request *req = workload->req; 365 void *shadow_ring_buffer_va; 366 u32 *cs; 367 int err; 368 369 if (GRAPHICS_VER(req->engine->i915) == 9 && is_inhibit_context(req->context)) 370 intel_vgpu_restore_inhibit_context(vgpu, req); 371 372 /* 373 * To track whether a request has started on HW, we can emit a 374 * breadcrumb at the beginning of the request and check its 375 * timeline's HWSP to see if the breadcrumb has advanced past the 376 * start of this request. Actually, the request must have the 377 * init_breadcrumb if its timeline set has_init_bread_crumb, or the 378 * scheduler might get a wrong state of it during reset. Since the 379 * requests from gvt always set the has_init_breadcrumb flag, here 380 * need to do the emit_init_breadcrumb for all the requests. 381 */ 382 if (req->engine->emit_init_breadcrumb) { 383 err = req->engine->emit_init_breadcrumb(req); 384 if (err) { 385 gvt_vgpu_err("fail to emit init breadcrumb\n"); 386 return err; 387 } 388 } 389 390 /* allocate shadow ring buffer */ 391 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); 392 if (IS_ERR(cs)) { 393 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n", 394 workload->rb_len); 395 return PTR_ERR(cs); 396 } 397 398 shadow_ring_buffer_va = workload->shadow_ring_buffer_va; 399 400 /* get shadow ring buffer va */ 401 workload->shadow_ring_buffer_va = cs; 402 403 memcpy(cs, shadow_ring_buffer_va, 404 workload->rb_len); 405 406 cs += workload->rb_len / sizeof(u32); 407 intel_ring_advance(workload->req, cs); 408 409 return 0; 410 } 411 412 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 413 { 414 if (!wa_ctx->indirect_ctx.obj) 415 return; 416 417 i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL); 418 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); 419 i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); 420 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 421 422 wa_ctx->indirect_ctx.obj = NULL; 423 wa_ctx->indirect_ctx.shadow_va = NULL; 424 } 425 426 static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr) 427 { 428 struct scatterlist *sg = pd->pt.base->mm.pages->sgl; 429 430 /* This is not a good idea */ 431 sg->dma_address = addr; 432 } 433 434 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, 435 struct intel_context *ce) 436 { 437 struct intel_vgpu_mm *mm = workload->shadow_mm; 438 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); 439 int i = 0; 440 441 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 442 set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]); 443 } else { 444 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { 445 struct i915_page_directory * const pd = 446 i915_pd_entry(ppgtt->pd, i); 447 /* skip now as current i915 ppgtt alloc won't allocate 448 top level pdp for non 4-level table, won't impact 449 shadow ppgtt. */ 450 if (!pd) 451 break; 452 453 set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]); 454 } 455 } 456 } 457 458 static int 459 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) 460 { 461 struct intel_vgpu *vgpu = workload->vgpu; 462 struct intel_vgpu_submission *s = &vgpu->submission; 463 struct i915_request *rq; 464 465 if (workload->req) 466 return 0; 467 468 rq = i915_request_create(s->shadow[workload->engine->id]); 469 if (IS_ERR(rq)) { 470 gvt_vgpu_err("fail to allocate gem request\n"); 471 return PTR_ERR(rq); 472 } 473 474 workload->req = i915_request_get(rq); 475 return 0; 476 } 477 478 /** 479 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 480 * shadow it as well, include ringbuffer,wa_ctx and ctx. 481 * @workload: an abstract entity for each execlist submission. 482 * 483 * This function is called before the workload submitting to i915, to make 484 * sure the content of the workload is valid. 485 */ 486 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 487 { 488 struct intel_vgpu *vgpu = workload->vgpu; 489 struct intel_vgpu_submission *s = &vgpu->submission; 490 int ret; 491 492 lockdep_assert_held(&vgpu->vgpu_lock); 493 494 if (workload->shadow) 495 return 0; 496 497 if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated)) 498 shadow_context_descriptor_update(s->shadow[workload->engine->id], 499 workload); 500 501 ret = intel_gvt_scan_and_shadow_ringbuffer(workload); 502 if (ret) 503 return ret; 504 505 if (workload->engine->id == RCS0 && 506 workload->wa_ctx.indirect_ctx.size) { 507 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 508 if (ret) 509 goto err_shadow; 510 } 511 512 workload->shadow = true; 513 return 0; 514 515 err_shadow: 516 release_shadow_wa_ctx(&workload->wa_ctx); 517 return ret; 518 } 519 520 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload); 521 522 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 523 { 524 struct intel_gvt *gvt = workload->vgpu->gvt; 525 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; 526 struct intel_vgpu_shadow_bb *bb; 527 struct i915_gem_ww_ctx ww; 528 int ret; 529 530 list_for_each_entry(bb, &workload->shadow_bb, list) { 531 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va 532 * is only updated into ring_scan_buffer, not real ring address 533 * allocated in later copy_workload_to_ring_buffer. pls be noted 534 * shadow_ring_buffer_va is now pointed to real ring buffer va 535 * in copy_workload_to_ring_buffer. 536 */ 537 538 if (bb->bb_offset) 539 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va 540 + bb->bb_offset; 541 542 /* 543 * For non-priv bb, scan&shadow is only for 544 * debugging purpose, so the content of shadow bb 545 * is the same as original bb. Therefore, 546 * here, rather than switch to shadow bb's gma 547 * address, we directly use original batch buffer's 548 * gma address, and send original bb to hardware 549 * directly 550 */ 551 if (!bb->ppgtt) { 552 i915_gem_ww_ctx_init(&ww, false); 553 retry: 554 i915_gem_object_lock(bb->obj, &ww); 555 556 bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww, 557 NULL, 0, 0, 0); 558 if (IS_ERR(bb->vma)) { 559 ret = PTR_ERR(bb->vma); 560 if (ret == -EDEADLK) { 561 ret = i915_gem_ww_ctx_backoff(&ww); 562 if (!ret) 563 goto retry; 564 } 565 goto err; 566 } 567 568 /* relocate shadow batch buffer */ 569 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); 570 if (gmadr_bytes == 8) 571 bb->bb_start_cmd_va[2] = 0; 572 573 ret = i915_vma_move_to_active(bb->vma, 574 workload->req, 575 0); 576 if (ret) 577 goto err; 578 579 /* No one is going to touch shadow bb from now on. */ 580 i915_gem_object_flush_map(bb->obj); 581 i915_gem_ww_ctx_fini(&ww); 582 } 583 } 584 return 0; 585 err: 586 i915_gem_ww_ctx_fini(&ww); 587 release_shadow_batch_buffer(workload); 588 return ret; 589 } 590 591 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) 592 { 593 struct intel_vgpu_workload *workload = 594 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); 595 struct i915_request *rq = workload->req; 596 struct execlist_ring_context *shadow_ring_context = 597 (struct execlist_ring_context *)rq->context->lrc_reg_state; 598 599 shadow_ring_context->bb_per_ctx_ptr.val = 600 (shadow_ring_context->bb_per_ctx_ptr.val & 601 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; 602 shadow_ring_context->rcs_indirect_ctx.val = 603 (shadow_ring_context->rcs_indirect_ctx.val & 604 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; 605 } 606 607 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 608 { 609 struct i915_vma *vma; 610 unsigned char *per_ctx_va = 611 (unsigned char *)wa_ctx->indirect_ctx.shadow_va + 612 wa_ctx->indirect_ctx.size; 613 struct i915_gem_ww_ctx ww; 614 int ret; 615 616 if (wa_ctx->indirect_ctx.size == 0) 617 return 0; 618 619 i915_gem_ww_ctx_init(&ww, false); 620 retry: 621 i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww); 622 623 vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL, 624 0, CACHELINE_BYTES, 0); 625 if (IS_ERR(vma)) { 626 ret = PTR_ERR(vma); 627 if (ret == -EDEADLK) { 628 ret = i915_gem_ww_ctx_backoff(&ww); 629 if (!ret) 630 goto retry; 631 } 632 return ret; 633 } 634 635 i915_gem_ww_ctx_fini(&ww); 636 637 /* FIXME: we are not tracking our pinned VMA leaving it 638 * up to the core to fix up the stray pin_count upon 639 * free. 640 */ 641 642 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); 643 644 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); 645 memset(per_ctx_va, 0, CACHELINE_BYTES); 646 647 update_wa_ctx_2_shadow_ctx(wa_ctx); 648 return 0; 649 } 650 651 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) 652 { 653 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) = 654 workload->rb_start; 655 } 656 657 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) 658 { 659 struct intel_vgpu_shadow_bb *bb, *pos; 660 661 if (list_empty(&workload->shadow_bb)) 662 return; 663 664 bb = list_first_entry(&workload->shadow_bb, 665 struct intel_vgpu_shadow_bb, list); 666 667 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { 668 if (bb->obj) { 669 i915_gem_object_lock(bb->obj, NULL); 670 if (bb->va && !IS_ERR(bb->va)) 671 i915_gem_object_unpin_map(bb->obj); 672 673 if (bb->vma && !IS_ERR(bb->vma)) 674 i915_vma_unpin(bb->vma); 675 676 i915_gem_object_unlock(bb->obj); 677 i915_gem_object_put(bb->obj); 678 } 679 list_del(&bb->list); 680 kfree(bb); 681 } 682 } 683 684 static int 685 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) 686 { 687 struct intel_vgpu *vgpu = workload->vgpu; 688 struct intel_vgpu_mm *m; 689 int ret = 0; 690 691 ret = intel_vgpu_pin_mm(workload->shadow_mm); 692 if (ret) { 693 gvt_vgpu_err("fail to vgpu pin mm\n"); 694 return ret; 695 } 696 697 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || 698 !workload->shadow_mm->ppgtt_mm.shadowed) { 699 gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); 700 return -EINVAL; 701 } 702 703 if (!list_empty(&workload->lri_shadow_mm)) { 704 list_for_each_entry(m, &workload->lri_shadow_mm, 705 ppgtt_mm.link) { 706 ret = intel_vgpu_pin_mm(m); 707 if (ret) { 708 list_for_each_entry_from_reverse(m, 709 &workload->lri_shadow_mm, 710 ppgtt_mm.link) 711 intel_vgpu_unpin_mm(m); 712 gvt_vgpu_err("LRI shadow ppgtt fail to pin\n"); 713 break; 714 } 715 } 716 } 717 718 if (ret) 719 intel_vgpu_unpin_mm(workload->shadow_mm); 720 721 return ret; 722 } 723 724 static void 725 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload) 726 { 727 struct intel_vgpu_mm *m; 728 729 if (!list_empty(&workload->lri_shadow_mm)) { 730 list_for_each_entry(m, &workload->lri_shadow_mm, 731 ppgtt_mm.link) 732 intel_vgpu_unpin_mm(m); 733 } 734 intel_vgpu_unpin_mm(workload->shadow_mm); 735 } 736 737 static int prepare_workload(struct intel_vgpu_workload *workload) 738 { 739 struct intel_vgpu *vgpu = workload->vgpu; 740 struct intel_vgpu_submission *s = &vgpu->submission; 741 int ret = 0; 742 743 ret = intel_vgpu_shadow_mm_pin(workload); 744 if (ret) { 745 gvt_vgpu_err("fail to pin shadow mm\n"); 746 return ret; 747 } 748 749 update_shadow_pdps(workload); 750 751 set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]); 752 753 ret = intel_vgpu_sync_oos_pages(workload->vgpu); 754 if (ret) { 755 gvt_vgpu_err("fail to vgpu sync oos pages\n"); 756 goto err_unpin_mm; 757 } 758 759 ret = intel_vgpu_flush_post_shadow(workload->vgpu); 760 if (ret) { 761 gvt_vgpu_err("fail to flush post shadow\n"); 762 goto err_unpin_mm; 763 } 764 765 ret = copy_workload_to_ring_buffer(workload); 766 if (ret) { 767 gvt_vgpu_err("fail to generate request\n"); 768 goto err_unpin_mm; 769 } 770 771 ret = prepare_shadow_batch_buffer(workload); 772 if (ret) { 773 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); 774 goto err_unpin_mm; 775 } 776 777 ret = prepare_shadow_wa_ctx(&workload->wa_ctx); 778 if (ret) { 779 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); 780 goto err_shadow_batch; 781 } 782 783 if (workload->prepare) { 784 ret = workload->prepare(workload); 785 if (ret) 786 goto err_shadow_wa_ctx; 787 } 788 789 return 0; 790 err_shadow_wa_ctx: 791 release_shadow_wa_ctx(&workload->wa_ctx); 792 err_shadow_batch: 793 release_shadow_batch_buffer(workload); 794 err_unpin_mm: 795 intel_vgpu_shadow_mm_unpin(workload); 796 return ret; 797 } 798 799 static int dispatch_workload(struct intel_vgpu_workload *workload) 800 { 801 struct intel_vgpu *vgpu = workload->vgpu; 802 struct i915_request *rq; 803 int ret; 804 805 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n", 806 workload->engine->name, workload); 807 808 mutex_lock(&vgpu->vgpu_lock); 809 810 ret = intel_gvt_workload_req_alloc(workload); 811 if (ret) 812 goto err_req; 813 814 ret = intel_gvt_scan_and_shadow_workload(workload); 815 if (ret) 816 goto out; 817 818 ret = populate_shadow_context(workload); 819 if (ret) { 820 release_shadow_wa_ctx(&workload->wa_ctx); 821 goto out; 822 } 823 824 ret = prepare_workload(workload); 825 out: 826 if (ret) { 827 /* We might still need to add request with 828 * clean ctx to retire it properly.. 829 */ 830 rq = fetch_and_zero(&workload->req); 831 i915_request_put(rq); 832 } 833 834 if (!IS_ERR_OR_NULL(workload->req)) { 835 gvt_dbg_sched("ring id %s submit workload to i915 %p\n", 836 workload->engine->name, workload->req); 837 i915_request_add(workload->req); 838 workload->dispatched = true; 839 } 840 err_req: 841 if (ret) 842 workload->status = ret; 843 mutex_unlock(&vgpu->vgpu_lock); 844 return ret; 845 } 846 847 static struct intel_vgpu_workload * 848 pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine) 849 { 850 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 851 struct intel_vgpu_workload *workload = NULL; 852 853 mutex_lock(&gvt->sched_lock); 854 855 /* 856 * no current vgpu / will be scheduled out / no workload 857 * bail out 858 */ 859 if (!scheduler->current_vgpu) { 860 gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name); 861 goto out; 862 } 863 864 if (scheduler->need_reschedule) { 865 gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name); 866 goto out; 867 } 868 869 if (!scheduler->current_vgpu->active || 870 list_empty(workload_q_head(scheduler->current_vgpu, engine))) 871 goto out; 872 873 /* 874 * still have current workload, maybe the workload disptacher 875 * fail to submit it for some reason, resubmit it. 876 */ 877 if (scheduler->current_workload[engine->id]) { 878 workload = scheduler->current_workload[engine->id]; 879 gvt_dbg_sched("ring %s still have current workload %p\n", 880 engine->name, workload); 881 goto out; 882 } 883 884 /* 885 * pick a workload as current workload 886 * once current workload is set, schedule policy routines 887 * will wait the current workload is finished when trying to 888 * schedule out a vgpu. 889 */ 890 scheduler->current_workload[engine->id] = 891 list_first_entry(workload_q_head(scheduler->current_vgpu, 892 engine), 893 struct intel_vgpu_workload, list); 894 895 workload = scheduler->current_workload[engine->id]; 896 897 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload); 898 899 atomic_inc(&workload->vgpu->submission.running_workload_num); 900 out: 901 mutex_unlock(&gvt->sched_lock); 902 return workload; 903 } 904 905 static void update_guest_pdps(struct intel_vgpu *vgpu, 906 u64 ring_context_gpa, u32 pdp[8]) 907 { 908 u64 gpa; 909 int i; 910 911 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); 912 913 for (i = 0; i < 8; i++) 914 intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); 915 } 916 917 static __maybe_unused bool 918 check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m) 919 { 920 if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 921 u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32; 922 923 if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) { 924 gvt_dbg_mm("4-level context ppgtt not match LRI command\n"); 925 return false; 926 } 927 return true; 928 } else { 929 /* see comment in LRI handler in cmd_parser.c */ 930 gvt_dbg_mm("invalid shadow mm type\n"); 931 return false; 932 } 933 } 934 935 static void update_guest_context(struct intel_vgpu_workload *workload) 936 { 937 struct i915_request *rq = workload->req; 938 struct intel_vgpu *vgpu = workload->vgpu; 939 struct execlist_ring_context *shadow_ring_context; 940 struct intel_context *ctx = workload->req->context; 941 void *context_base; 942 void *src; 943 unsigned long context_gpa, context_page_num; 944 unsigned long gpa_base; /* first gpa of consecutive GPAs */ 945 unsigned long gpa_size; /* size of consecutive GPAs*/ 946 int i; 947 u32 ring_base; 948 u32 head, tail; 949 u16 wrap_count; 950 951 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, 952 workload->ctx_desc.lrca); 953 954 GEM_BUG_ON(!intel_context_is_pinned(ctx)); 955 956 head = workload->rb_head; 957 tail = workload->rb_tail; 958 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; 959 960 if (tail < head) { 961 if (wrap_count == RB_HEAD_WRAP_CNT_MAX) 962 wrap_count = 0; 963 else 964 wrap_count += 1; 965 } 966 967 head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; 968 969 ring_base = rq->engine->mmio_base; 970 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; 971 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; 972 973 context_page_num = rq->engine->context_size; 974 context_page_num = context_page_num >> PAGE_SHIFT; 975 976 if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0) 977 context_page_num = 19; 978 979 context_base = (void *) ctx->lrc_reg_state - 980 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT); 981 982 /* find consecutive GPAs from gma until the first inconsecutive GPA. 983 * write to the consecutive GPAs from src virtual address 984 */ 985 gpa_size = 0; 986 for (i = 2; i < context_page_num; i++) { 987 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 988 (u32)((workload->ctx_desc.lrca + i) << 989 I915_GTT_PAGE_SHIFT)); 990 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 991 gvt_vgpu_err("invalid guest context descriptor\n"); 992 return; 993 } 994 995 if (gpa_size == 0) { 996 gpa_base = context_gpa; 997 src = context_base + (i << I915_GTT_PAGE_SHIFT); 998 } else if (context_gpa != gpa_base + gpa_size) 999 goto write; 1000 1001 gpa_size += I915_GTT_PAGE_SIZE; 1002 1003 if (i == context_page_num - 1) 1004 goto write; 1005 1006 continue; 1007 1008 write: 1009 intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size); 1010 gpa_base = context_gpa; 1011 gpa_size = I915_GTT_PAGE_SIZE; 1012 src = context_base + (i << I915_GTT_PAGE_SHIFT); 1013 } 1014 1015 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + 1016 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); 1017 1018 shadow_ring_context = (void *) ctx->lrc_reg_state; 1019 1020 if (!list_empty(&workload->lri_shadow_mm)) { 1021 struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm, 1022 struct intel_vgpu_mm, 1023 ppgtt_mm.link); 1024 GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m)); 1025 update_guest_pdps(vgpu, workload->ring_context_gpa, 1026 (void *)m->ppgtt_mm.guest_pdps); 1027 } 1028 1029 #define COPY_REG(name) \ 1030 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \ 1031 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 1032 1033 COPY_REG(ctx_ctrl); 1034 COPY_REG(ctx_timestamp); 1035 1036 #undef COPY_REG 1037 1038 intel_gvt_write_gpa(vgpu, 1039 workload->ring_context_gpa + 1040 sizeof(*shadow_ring_context), 1041 (void *)shadow_ring_context + 1042 sizeof(*shadow_ring_context), 1043 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 1044 } 1045 1046 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, 1047 intel_engine_mask_t engine_mask) 1048 { 1049 struct intel_vgpu_submission *s = &vgpu->submission; 1050 struct intel_engine_cs *engine; 1051 struct intel_vgpu_workload *pos, *n; 1052 intel_engine_mask_t tmp; 1053 1054 /* free the unsubmited workloads in the queues. */ 1055 for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { 1056 list_for_each_entry_safe(pos, n, 1057 &s->workload_q_head[engine->id], list) { 1058 list_del_init(&pos->list); 1059 intel_vgpu_destroy_workload(pos); 1060 } 1061 clear_bit(engine->id, s->shadow_ctx_desc_updated); 1062 } 1063 } 1064 1065 static void complete_current_workload(struct intel_gvt *gvt, int ring_id) 1066 { 1067 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1068 struct intel_vgpu_workload *workload = 1069 scheduler->current_workload[ring_id]; 1070 struct intel_vgpu *vgpu = workload->vgpu; 1071 struct intel_vgpu_submission *s = &vgpu->submission; 1072 struct i915_request *rq = workload->req; 1073 int event; 1074 1075 mutex_lock(&vgpu->vgpu_lock); 1076 mutex_lock(&gvt->sched_lock); 1077 1078 /* For the workload w/ request, needs to wait for the context 1079 * switch to make sure request is completed. 1080 * For the workload w/o request, directly complete the workload. 1081 */ 1082 if (rq) { 1083 wait_event(workload->shadow_ctx_status_wq, 1084 !atomic_read(&workload->shadow_ctx_active)); 1085 1086 /* If this request caused GPU hang, req->fence.error will 1087 * be set to -EIO. Use -EIO to set workload status so 1088 * that when this request caused GPU hang, didn't trigger 1089 * context switch interrupt to guest. 1090 */ 1091 if (likely(workload->status == -EINPROGRESS)) { 1092 if (workload->req->fence.error == -EIO) 1093 workload->status = -EIO; 1094 else 1095 workload->status = 0; 1096 } 1097 1098 if (!workload->status && 1099 !(vgpu->resetting_eng & BIT(ring_id))) { 1100 update_guest_context(workload); 1101 1102 for_each_set_bit(event, workload->pending_events, 1103 INTEL_GVT_EVENT_MAX) 1104 intel_vgpu_trigger_virtual_event(vgpu, event); 1105 } 1106 1107 i915_request_put(fetch_and_zero(&workload->req)); 1108 } 1109 1110 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 1111 ring_id, workload, workload->status); 1112 1113 scheduler->current_workload[ring_id] = NULL; 1114 1115 list_del_init(&workload->list); 1116 1117 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { 1118 /* if workload->status is not successful means HW GPU 1119 * has occurred GPU hang or something wrong with i915/GVT, 1120 * and GVT won't inject context switch interrupt to guest. 1121 * So this error is a vGPU hang actually to the guest. 1122 * According to this we should emunlate a vGPU hang. If 1123 * there are pending workloads which are already submitted 1124 * from guest, we should clean them up like HW GPU does. 1125 * 1126 * if it is in middle of engine resetting, the pending 1127 * workloads won't be submitted to HW GPU and will be 1128 * cleaned up during the resetting process later, so doing 1129 * the workload clean up here doesn't have any impact. 1130 **/ 1131 intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); 1132 } 1133 1134 workload->complete(workload); 1135 1136 intel_vgpu_shadow_mm_unpin(workload); 1137 intel_vgpu_destroy_workload(workload); 1138 1139 atomic_dec(&s->running_workload_num); 1140 wake_up(&scheduler->workload_complete_wq); 1141 1142 if (gvt->scheduler.need_reschedule) 1143 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); 1144 1145 mutex_unlock(&gvt->sched_lock); 1146 mutex_unlock(&vgpu->vgpu_lock); 1147 } 1148 1149 static int workload_thread(void *arg) 1150 { 1151 struct intel_engine_cs *engine = arg; 1152 const bool need_force_wake = GRAPHICS_VER(engine->i915) >= 9; 1153 struct intel_gvt *gvt = engine->i915->gvt; 1154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1155 struct intel_vgpu_workload *workload = NULL; 1156 struct intel_vgpu *vgpu = NULL; 1157 int ret; 1158 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1159 1160 gvt_dbg_core("workload thread for ring %s started\n", engine->name); 1161 1162 while (!kthread_should_stop()) { 1163 intel_wakeref_t wakeref; 1164 1165 add_wait_queue(&scheduler->waitq[engine->id], &wait); 1166 do { 1167 workload = pick_next_workload(gvt, engine); 1168 if (workload) 1169 break; 1170 wait_woken(&wait, TASK_INTERRUPTIBLE, 1171 MAX_SCHEDULE_TIMEOUT); 1172 } while (!kthread_should_stop()); 1173 remove_wait_queue(&scheduler->waitq[engine->id], &wait); 1174 1175 if (!workload) 1176 break; 1177 1178 gvt_dbg_sched("ring %s next workload %p vgpu %d\n", 1179 engine->name, workload, 1180 workload->vgpu->id); 1181 1182 wakeref = intel_runtime_pm_get(engine->uncore->rpm); 1183 1184 gvt_dbg_sched("ring %s will dispatch workload %p\n", 1185 engine->name, workload); 1186 1187 if (need_force_wake) 1188 intel_uncore_forcewake_get(engine->uncore, 1189 FORCEWAKE_ALL); 1190 /* 1191 * Update the vReg of the vGPU which submitted this 1192 * workload. The vGPU may use these registers for checking 1193 * the context state. The value comes from GPU commands 1194 * in this workload. 1195 */ 1196 update_vreg_in_ctx(workload); 1197 1198 ret = dispatch_workload(workload); 1199 1200 if (ret) { 1201 vgpu = workload->vgpu; 1202 gvt_vgpu_err("fail to dispatch workload, skip\n"); 1203 goto complete; 1204 } 1205 1206 gvt_dbg_sched("ring %s wait workload %p\n", 1207 engine->name, workload); 1208 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); 1209 1210 complete: 1211 gvt_dbg_sched("will complete workload %p, status: %d\n", 1212 workload, workload->status); 1213 1214 complete_current_workload(gvt, engine->id); 1215 1216 if (need_force_wake) 1217 intel_uncore_forcewake_put(engine->uncore, 1218 FORCEWAKE_ALL); 1219 1220 intel_runtime_pm_put(engine->uncore->rpm, wakeref); 1221 if (ret && (vgpu_is_vm_unhealthy(ret))) 1222 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1223 } 1224 return 0; 1225 } 1226 1227 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) 1228 { 1229 struct intel_vgpu_submission *s = &vgpu->submission; 1230 struct intel_gvt *gvt = vgpu->gvt; 1231 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1232 1233 if (atomic_read(&s->running_workload_num)) { 1234 gvt_dbg_sched("wait vgpu idle\n"); 1235 1236 wait_event(scheduler->workload_complete_wq, 1237 !atomic_read(&s->running_workload_num)); 1238 } 1239 } 1240 1241 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) 1242 { 1243 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1244 struct intel_engine_cs *engine; 1245 enum intel_engine_id i; 1246 1247 gvt_dbg_core("clean workload scheduler\n"); 1248 1249 for_each_engine(engine, gvt->gt, i) { 1250 atomic_notifier_chain_unregister( 1251 &engine->context_status_notifier, 1252 &gvt->shadow_ctx_notifier_block[i]); 1253 kthread_stop(scheduler->thread[i]); 1254 } 1255 } 1256 1257 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) 1258 { 1259 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 1260 struct intel_engine_cs *engine; 1261 enum intel_engine_id i; 1262 int ret; 1263 1264 gvt_dbg_core("init workload scheduler\n"); 1265 1266 init_waitqueue_head(&scheduler->workload_complete_wq); 1267 1268 for_each_engine(engine, gvt->gt, i) { 1269 init_waitqueue_head(&scheduler->waitq[i]); 1270 1271 scheduler->thread[i] = kthread_run(workload_thread, engine, 1272 "gvt:%s", engine->name); 1273 if (IS_ERR(scheduler->thread[i])) { 1274 gvt_err("fail to create workload thread\n"); 1275 ret = PTR_ERR(scheduler->thread[i]); 1276 goto err; 1277 } 1278 1279 gvt->shadow_ctx_notifier_block[i].notifier_call = 1280 shadow_context_status_change; 1281 atomic_notifier_chain_register(&engine->context_status_notifier, 1282 &gvt->shadow_ctx_notifier_block[i]); 1283 } 1284 1285 return 0; 1286 1287 err: 1288 intel_gvt_clean_workload_scheduler(gvt); 1289 return ret; 1290 } 1291 1292 static void 1293 i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s, 1294 struct i915_ppgtt *ppgtt) 1295 { 1296 int i; 1297 1298 if (i915_vm_is_4lvl(&ppgtt->vm)) { 1299 set_dma_address(ppgtt->pd, s->i915_context_pml4); 1300 } else { 1301 for (i = 0; i < GEN8_3LVL_PDPES; i++) { 1302 struct i915_page_directory * const pd = 1303 i915_pd_entry(ppgtt->pd, i); 1304 1305 set_dma_address(pd, s->i915_context_pdps[i]); 1306 } 1307 } 1308 } 1309 1310 /** 1311 * intel_vgpu_clean_submission - free submission-related resource for vGPU 1312 * @vgpu: a vGPU 1313 * 1314 * This function is called when a vGPU is being destroyed. 1315 * 1316 */ 1317 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) 1318 { 1319 struct intel_vgpu_submission *s = &vgpu->submission; 1320 struct intel_engine_cs *engine; 1321 enum intel_engine_id id; 1322 1323 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); 1324 1325 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); 1326 for_each_engine(engine, vgpu->gvt->gt, id) 1327 intel_context_put(s->shadow[id]); 1328 1329 kmem_cache_destroy(s->workloads); 1330 } 1331 1332 1333 /** 1334 * intel_vgpu_reset_submission - reset submission-related resource for vGPU 1335 * @vgpu: a vGPU 1336 * @engine_mask: engines expected to be reset 1337 * 1338 * This function is called when a vGPU is being destroyed. 1339 * 1340 */ 1341 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, 1342 intel_engine_mask_t engine_mask) 1343 { 1344 struct intel_vgpu_submission *s = &vgpu->submission; 1345 1346 if (!s->active) 1347 return; 1348 1349 intel_vgpu_clean_workloads(vgpu, engine_mask); 1350 s->ops->reset(vgpu, engine_mask); 1351 } 1352 1353 static void 1354 i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, 1355 struct i915_ppgtt *ppgtt) 1356 { 1357 int i; 1358 1359 if (i915_vm_is_4lvl(&ppgtt->vm)) { 1360 s->i915_context_pml4 = px_dma(ppgtt->pd); 1361 } else { 1362 for (i = 0; i < GEN8_3LVL_PDPES; i++) { 1363 struct i915_page_directory * const pd = 1364 i915_pd_entry(ppgtt->pd, i); 1365 1366 s->i915_context_pdps[i] = px_dma(pd); 1367 } 1368 } 1369 } 1370 1371 /** 1372 * intel_vgpu_setup_submission - setup submission-related resource for vGPU 1373 * @vgpu: a vGPU 1374 * 1375 * This function is called when a vGPU is being created. 1376 * 1377 * Returns: 1378 * Zero on success, negative error code if failed. 1379 * 1380 */ 1381 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) 1382 { 1383 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 1384 struct intel_vgpu_submission *s = &vgpu->submission; 1385 struct intel_engine_cs *engine; 1386 struct i915_ppgtt *ppgtt; 1387 enum intel_engine_id i; 1388 int ret; 1389 1390 ppgtt = i915_ppgtt_create(to_gt(i915), I915_BO_ALLOC_PM_EARLY); 1391 if (IS_ERR(ppgtt)) 1392 return PTR_ERR(ppgtt); 1393 1394 i915_context_ppgtt_root_save(s, ppgtt); 1395 1396 for_each_engine(engine, vgpu->gvt->gt, i) { 1397 struct intel_context *ce; 1398 1399 INIT_LIST_HEAD(&s->workload_q_head[i]); 1400 s->shadow[i] = ERR_PTR(-EINVAL); 1401 1402 ce = intel_context_create(engine); 1403 if (IS_ERR(ce)) { 1404 ret = PTR_ERR(ce); 1405 goto out_shadow_ctx; 1406 } 1407 1408 i915_vm_put(ce->vm); 1409 ce->vm = i915_vm_get(&ppgtt->vm); 1410 intel_context_set_single_submission(ce); 1411 1412 /* Max ring buffer size */ 1413 if (!intel_uc_wants_guc_submission(&engine->gt->uc)) 1414 ce->ring_size = SZ_2M; 1415 1416 s->shadow[i] = ce; 1417 } 1418 1419 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); 1420 1421 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", 1422 sizeof(struct intel_vgpu_workload), 0, 1423 SLAB_HWCACHE_ALIGN, 1424 offsetof(struct intel_vgpu_workload, rb_tail), 1425 sizeof_field(struct intel_vgpu_workload, rb_tail), 1426 NULL); 1427 1428 if (!s->workloads) { 1429 ret = -ENOMEM; 1430 goto out_shadow_ctx; 1431 } 1432 1433 atomic_set(&s->running_workload_num, 0); 1434 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); 1435 1436 memset(s->last_ctx, 0, sizeof(s->last_ctx)); 1437 1438 i915_vm_put(&ppgtt->vm); 1439 return 0; 1440 1441 out_shadow_ctx: 1442 i915_context_ppgtt_root_restore(s, ppgtt); 1443 for_each_engine(engine, vgpu->gvt->gt, i) { 1444 if (IS_ERR(s->shadow[i])) 1445 break; 1446 1447 intel_context_put(s->shadow[i]); 1448 } 1449 i915_vm_put(&ppgtt->vm); 1450 return ret; 1451 } 1452 1453 /** 1454 * intel_vgpu_select_submission_ops - select virtual submission interface 1455 * @vgpu: a vGPU 1456 * @engine_mask: either ALL_ENGINES or target engine mask 1457 * @interface: expected vGPU virtual submission interface 1458 * 1459 * This function is called when guest configures submission interface. 1460 * 1461 * Returns: 1462 * Zero on success, negative error code if failed. 1463 * 1464 */ 1465 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 1466 intel_engine_mask_t engine_mask, 1467 unsigned int interface) 1468 { 1469 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 1470 struct intel_vgpu_submission *s = &vgpu->submission; 1471 const struct intel_vgpu_submission_ops *ops[] = { 1472 [INTEL_VGPU_EXECLIST_SUBMISSION] = 1473 &intel_vgpu_execlist_submission_ops, 1474 }; 1475 int ret; 1476 1477 if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops))) 1478 return -EINVAL; 1479 1480 if (drm_WARN_ON(&i915->drm, 1481 interface == 0 && engine_mask != ALL_ENGINES)) 1482 return -EINVAL; 1483 1484 if (s->active) 1485 s->ops->clean(vgpu, engine_mask); 1486 1487 if (interface == 0) { 1488 s->ops = NULL; 1489 s->virtual_submission_interface = 0; 1490 s->active = false; 1491 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); 1492 return 0; 1493 } 1494 1495 ret = ops[interface]->init(vgpu, engine_mask); 1496 if (ret) 1497 return ret; 1498 1499 s->ops = ops[interface]; 1500 s->virtual_submission_interface = interface; 1501 s->active = true; 1502 1503 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n", 1504 vgpu->id, s->ops->name); 1505 1506 return 0; 1507 } 1508 1509 /** 1510 * intel_vgpu_destroy_workload - destroy a vGPU workload 1511 * @workload: workload to destroy 1512 * 1513 * This function is called when destroy a vGPU workload. 1514 * 1515 */ 1516 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) 1517 { 1518 struct intel_vgpu_submission *s = &workload->vgpu->submission; 1519 1520 intel_context_unpin(s->shadow[workload->engine->id]); 1521 release_shadow_batch_buffer(workload); 1522 release_shadow_wa_ctx(&workload->wa_ctx); 1523 1524 if (!list_empty(&workload->lri_shadow_mm)) { 1525 struct intel_vgpu_mm *m, *mm; 1526 list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm, 1527 ppgtt_mm.link) { 1528 list_del(&m->ppgtt_mm.link); 1529 intel_vgpu_mm_put(m); 1530 } 1531 } 1532 1533 GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm)); 1534 if (workload->shadow_mm) 1535 intel_vgpu_mm_put(workload->shadow_mm); 1536 1537 kmem_cache_free(s->workloads, workload); 1538 } 1539 1540 static struct intel_vgpu_workload * 1541 alloc_workload(struct intel_vgpu *vgpu) 1542 { 1543 struct intel_vgpu_submission *s = &vgpu->submission; 1544 struct intel_vgpu_workload *workload; 1545 1546 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL); 1547 if (!workload) 1548 return ERR_PTR(-ENOMEM); 1549 1550 INIT_LIST_HEAD(&workload->list); 1551 INIT_LIST_HEAD(&workload->shadow_bb); 1552 INIT_LIST_HEAD(&workload->lri_shadow_mm); 1553 1554 init_waitqueue_head(&workload->shadow_ctx_status_wq); 1555 atomic_set(&workload->shadow_ctx_active, 0); 1556 1557 workload->status = -EINPROGRESS; 1558 workload->vgpu = vgpu; 1559 1560 return workload; 1561 } 1562 1563 #define RING_CTX_OFF(x) \ 1564 offsetof(struct execlist_ring_context, x) 1565 1566 static void read_guest_pdps(struct intel_vgpu *vgpu, 1567 u64 ring_context_gpa, u32 pdp[8]) 1568 { 1569 u64 gpa; 1570 int i; 1571 1572 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); 1573 1574 for (i = 0; i < 8; i++) 1575 intel_gvt_read_gpa(vgpu, 1576 gpa + i * 8, &pdp[7 - i], 4); 1577 } 1578 1579 static int prepare_mm(struct intel_vgpu_workload *workload) 1580 { 1581 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 1582 struct intel_vgpu_mm *mm; 1583 struct intel_vgpu *vgpu = workload->vgpu; 1584 enum intel_gvt_gtt_type root_entry_type; 1585 u64 pdps[GVT_RING_CTX_NR_PDPS]; 1586 1587 switch (desc->addressing_mode) { 1588 case 1: /* legacy 32-bit */ 1589 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1590 break; 1591 case 3: /* legacy 64-bit */ 1592 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1593 break; 1594 default: 1595 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); 1596 return -EINVAL; 1597 } 1598 1599 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); 1600 1601 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); 1602 if (IS_ERR(mm)) 1603 return PTR_ERR(mm); 1604 1605 workload->shadow_mm = mm; 1606 return 0; 1607 } 1608 1609 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 1610 ((a)->lrca == (b)->lrca)) 1611 1612 /** 1613 * intel_vgpu_create_workload - create a vGPU workload 1614 * @vgpu: a vGPU 1615 * @engine: the engine 1616 * @desc: a guest context descriptor 1617 * 1618 * This function is called when creating a vGPU workload. 1619 * 1620 * Returns: 1621 * struct intel_vgpu_workload * on success, negative error code in 1622 * pointer if failed. 1623 * 1624 */ 1625 struct intel_vgpu_workload * 1626 intel_vgpu_create_workload(struct intel_vgpu *vgpu, 1627 const struct intel_engine_cs *engine, 1628 struct execlist_ctx_descriptor_format *desc) 1629 { 1630 struct intel_vgpu_submission *s = &vgpu->submission; 1631 struct list_head *q = workload_q_head(vgpu, engine); 1632 struct intel_vgpu_workload *last_workload = NULL; 1633 struct intel_vgpu_workload *workload = NULL; 1634 u64 ring_context_gpa; 1635 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; 1636 u32 guest_head; 1637 int ret; 1638 1639 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 1640 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT)); 1641 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { 1642 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); 1643 return ERR_PTR(-EINVAL); 1644 } 1645 1646 intel_gvt_read_gpa(vgpu, ring_context_gpa + 1647 RING_CTX_OFF(ring_header.val), &head, 4); 1648 1649 intel_gvt_read_gpa(vgpu, ring_context_gpa + 1650 RING_CTX_OFF(ring_tail.val), &tail, 4); 1651 1652 guest_head = head; 1653 1654 head &= RB_HEAD_OFF_MASK; 1655 tail &= RB_TAIL_OFF_MASK; 1656 1657 list_for_each_entry_reverse(last_workload, q, list) { 1658 1659 if (same_context(&last_workload->ctx_desc, desc)) { 1660 gvt_dbg_el("ring %s cur workload == last\n", 1661 engine->name); 1662 gvt_dbg_el("ctx head %x real head %lx\n", head, 1663 last_workload->rb_tail); 1664 /* 1665 * cannot use guest context head pointer here, 1666 * as it might not be updated at this time 1667 */ 1668 head = last_workload->rb_tail; 1669 break; 1670 } 1671 } 1672 1673 gvt_dbg_el("ring %s begin a new workload\n", engine->name); 1674 1675 /* record some ring buffer register values for scan and shadow */ 1676 intel_gvt_read_gpa(vgpu, ring_context_gpa + 1677 RING_CTX_OFF(rb_start.val), &start, 4); 1678 intel_gvt_read_gpa(vgpu, ring_context_gpa + 1679 RING_CTX_OFF(rb_ctrl.val), &ctl, 4); 1680 intel_gvt_read_gpa(vgpu, ring_context_gpa + 1681 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); 1682 1683 if (!intel_gvt_ggtt_validate_range(vgpu, start, 1684 _RING_CTL_BUF_SIZE(ctl))) { 1685 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); 1686 return ERR_PTR(-EINVAL); 1687 } 1688 1689 workload = alloc_workload(vgpu); 1690 if (IS_ERR(workload)) 1691 return workload; 1692 1693 workload->engine = engine; 1694 workload->ctx_desc = *desc; 1695 workload->ring_context_gpa = ring_context_gpa; 1696 workload->rb_head = head; 1697 workload->guest_rb_head = guest_head; 1698 workload->rb_tail = tail; 1699 workload->rb_start = start; 1700 workload->rb_ctl = ctl; 1701 1702 if (engine->id == RCS0) { 1703 intel_gvt_read_gpa(vgpu, ring_context_gpa + 1704 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); 1705 intel_gvt_read_gpa(vgpu, ring_context_gpa + 1706 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); 1707 1708 workload->wa_ctx.indirect_ctx.guest_gma = 1709 indirect_ctx & INDIRECT_CTX_ADDR_MASK; 1710 workload->wa_ctx.indirect_ctx.size = 1711 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * 1712 CACHELINE_BYTES; 1713 1714 if (workload->wa_ctx.indirect_ctx.size != 0) { 1715 if (!intel_gvt_ggtt_validate_range(vgpu, 1716 workload->wa_ctx.indirect_ctx.guest_gma, 1717 workload->wa_ctx.indirect_ctx.size)) { 1718 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", 1719 workload->wa_ctx.indirect_ctx.guest_gma); 1720 kmem_cache_free(s->workloads, workload); 1721 return ERR_PTR(-EINVAL); 1722 } 1723 } 1724 1725 workload->wa_ctx.per_ctx.guest_gma = 1726 per_ctx & PER_CTX_ADDR_MASK; 1727 workload->wa_ctx.per_ctx.valid = per_ctx & 1; 1728 if (workload->wa_ctx.per_ctx.valid) { 1729 if (!intel_gvt_ggtt_validate_range(vgpu, 1730 workload->wa_ctx.per_ctx.guest_gma, 1731 CACHELINE_BYTES)) { 1732 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", 1733 workload->wa_ctx.per_ctx.guest_gma); 1734 kmem_cache_free(s->workloads, workload); 1735 return ERR_PTR(-EINVAL); 1736 } 1737 } 1738 } 1739 1740 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n", 1741 workload, engine->name, head, tail, start, ctl); 1742 1743 ret = prepare_mm(workload); 1744 if (ret) { 1745 kmem_cache_free(s->workloads, workload); 1746 return ERR_PTR(ret); 1747 } 1748 1749 /* Only scan and shadow the first workload in the queue 1750 * as there is only one pre-allocated buf-obj for shadow. 1751 */ 1752 if (list_empty(q)) { 1753 intel_wakeref_t wakeref; 1754 1755 with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref) 1756 ret = intel_gvt_scan_and_shadow_workload(workload); 1757 } 1758 1759 if (ret) { 1760 if (vgpu_is_vm_unhealthy(ret)) 1761 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1762 intel_vgpu_destroy_workload(workload); 1763 return ERR_PTR(ret); 1764 } 1765 1766 ret = intel_context_pin(s->shadow[engine->id]); 1767 if (ret) { 1768 intel_vgpu_destroy_workload(workload); 1769 return ERR_PTR(ret); 1770 } 1771 1772 return workload; 1773 } 1774 1775 /** 1776 * intel_vgpu_queue_workload - Qeue a vGPU workload 1777 * @workload: the workload to queue in 1778 */ 1779 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) 1780 { 1781 list_add_tail(&workload->list, 1782 workload_q_head(workload->vgpu, workload->engine)); 1783 intel_gvt_kick_schedule(workload->vgpu->gvt); 1784 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]); 1785 } 1786