1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Kevin Tian <kevin.tian@intel.com> 26 * 27 * Contributors: 28 * Zhi Wang <zhi.a.wang@intel.com> 29 * Changbin Du <changbin.du@intel.com> 30 * Zhenyu Wang <zhenyuw@linux.intel.com> 31 * Tina Zhang <tina.zhang@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gt/intel_context.h" 38 #include "gt/intel_gpu_commands.h" 39 #include "gt/intel_ring.h" 40 #include "gvt.h" 41 #include "trace.h" 42 43 #define GEN9_MOCS_SIZE 64 44 45 /* Raw offset is appened to each line for convenience. */ 46 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { 47 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ 48 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 49 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ 50 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ 51 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ 52 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ 53 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ 54 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ 55 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ 56 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ 57 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ 58 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ 59 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ 60 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ 61 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ 62 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ 63 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ 64 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ 65 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ 66 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ 67 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ 68 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ 69 70 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ 71 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ 72 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 73 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 74 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ 75 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ 76 }; 77 78 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { 79 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ 80 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 81 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ 82 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ 83 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ 84 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ 85 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ 86 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ 87 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ 88 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ 89 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ 90 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ 91 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ 92 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ 93 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ 94 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ 95 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ 96 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ 97 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ 98 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ 99 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ 100 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ 101 102 {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ 103 {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ 104 {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ 105 {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ 106 {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ 107 {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ 108 {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ 109 {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ 110 {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ 111 {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ 112 {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ 113 {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ 114 {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */ 115 {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */ 116 {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */ 117 {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */ 118 {RCS0, TRVADR, 0, true}, /* 0x4df0 */ 119 {RCS0, TRTTE, 0, true}, /* 0x4df4 */ 120 {RCS0, _MMIO(0x4dfc), 0, true}, 121 122 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ 123 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ 124 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 125 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 126 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ 127 128 {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ 129 130 {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ 131 132 {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ 133 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 134 {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ 135 {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ 136 137 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 138 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ 139 {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ 140 141 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 142 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 143 {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ 144 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ 145 }; 146 147 static struct { 148 bool initialized; 149 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE]; 150 u32 l3cc_table[GEN9_MOCS_SIZE / 2]; 151 } gen9_render_mocs; 152 153 static u32 gen9_mocs_mmio_offset_list[] = { 154 [RCS0] = 0xc800, 155 [VCS0] = 0xc900, 156 [VCS1] = 0xca00, 157 [BCS0] = 0xcc00, 158 [VECS0] = 0xcb00, 159 }; 160 161 static void load_render_mocs(const struct intel_engine_cs *engine) 162 { 163 struct intel_gvt *gvt = engine->i915->gvt; 164 struct intel_uncore *uncore = engine->uncore; 165 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; 166 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; 167 i915_reg_t offset; 168 int ring_id, i; 169 170 /* Platform doesn't have mocs mmios. */ 171 if (!regs) 172 return; 173 174 for (ring_id = 0; ring_id < cnt; ring_id++) { 175 if (!HAS_ENGINE(engine->gt, ring_id)) 176 continue; 177 178 offset.reg = regs[ring_id]; 179 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 180 gen9_render_mocs.control_table[ring_id][i] = 181 intel_uncore_read_fw(uncore, offset); 182 offset.reg += 4; 183 } 184 } 185 186 offset.reg = 0xb020; 187 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 188 gen9_render_mocs.l3cc_table[i] = 189 intel_uncore_read_fw(uncore, offset); 190 offset.reg += 4; 191 } 192 gen9_render_mocs.initialized = true; 193 } 194 195 static int 196 restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, 197 struct i915_request *req) 198 { 199 u32 *cs; 200 int ret; 201 struct engine_mmio *mmio; 202 struct intel_gvt *gvt = vgpu->gvt; 203 int ring_id = req->engine->id; 204 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 205 206 if (count == 0) 207 return 0; 208 209 ret = req->engine->emit_flush(req, EMIT_BARRIER); 210 if (ret) 211 return ret; 212 213 cs = intel_ring_begin(req, count * 2 + 2); 214 if (IS_ERR(cs)) 215 return PTR_ERR(cs); 216 217 *cs++ = MI_LOAD_REGISTER_IMM(count); 218 for (mmio = gvt->engine_mmio_list.mmio; 219 i915_mmio_reg_valid(mmio->reg); mmio++) { 220 if (mmio->id != ring_id || !mmio->in_context) 221 continue; 222 223 *cs++ = i915_mmio_reg_offset(mmio->reg); 224 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16); 225 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 226 *(cs-2), *(cs-1), vgpu->id, ring_id); 227 } 228 229 *cs++ = MI_NOOP; 230 intel_ring_advance(req, cs); 231 232 ret = req->engine->emit_flush(req, EMIT_BARRIER); 233 if (ret) 234 return ret; 235 236 return 0; 237 } 238 239 static int 240 restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu, 241 struct i915_request *req) 242 { 243 unsigned int index; 244 u32 *cs; 245 246 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); 247 if (IS_ERR(cs)) 248 return PTR_ERR(cs); 249 250 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); 251 252 for (index = 0; index < GEN9_MOCS_SIZE; index++) { 253 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); 254 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); 255 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 256 *(cs-2), *(cs-1), vgpu->id, req->engine->id); 257 258 } 259 260 *cs++ = MI_NOOP; 261 intel_ring_advance(req, cs); 262 263 return 0; 264 } 265 266 static int 267 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu, 268 struct i915_request *req) 269 { 270 unsigned int index; 271 u32 *cs; 272 273 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); 274 if (IS_ERR(cs)) 275 return PTR_ERR(cs); 276 277 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); 278 279 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { 280 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); 281 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); 282 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 283 *(cs-2), *(cs-1), vgpu->id, req->engine->id); 284 285 } 286 287 *cs++ = MI_NOOP; 288 intel_ring_advance(req, cs); 289 290 return 0; 291 } 292 293 /* 294 * Use lri command to initialize the mmio which is in context state image for 295 * inhibit context, it contains tracked engine mmio, render_mocs and 296 * render_mocs_l3cc. 297 */ 298 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, 299 struct i915_request *req) 300 { 301 int ret; 302 u32 *cs; 303 304 cs = intel_ring_begin(req, 2); 305 if (IS_ERR(cs)) 306 return PTR_ERR(cs); 307 308 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 309 *cs++ = MI_NOOP; 310 intel_ring_advance(req, cs); 311 312 ret = restore_context_mmio_for_inhibit(vgpu, req); 313 if (ret) 314 goto out; 315 316 /* no MOCS register in context except render engine */ 317 if (req->engine->id != RCS0) 318 goto out; 319 320 ret = restore_render_mocs_control_for_inhibit(vgpu, req); 321 if (ret) 322 goto out; 323 324 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req); 325 if (ret) 326 goto out; 327 328 out: 329 cs = intel_ring_begin(req, 2); 330 if (IS_ERR(cs)) 331 return PTR_ERR(cs); 332 333 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 334 *cs++ = MI_NOOP; 335 intel_ring_advance(req, cs); 336 337 return ret; 338 } 339 340 static u32 gen8_tlb_mmio_offset_list[] = { 341 [RCS0] = 0x4260, 342 [VCS0] = 0x4264, 343 [VCS1] = 0x4268, 344 [BCS0] = 0x426c, 345 [VECS0] = 0x4270, 346 }; 347 348 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, 349 const struct intel_engine_cs *engine) 350 { 351 struct intel_uncore *uncore = engine->uncore; 352 struct intel_vgpu_submission *s = &vgpu->submission; 353 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; 354 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; 355 enum forcewake_domains fw; 356 i915_reg_t reg; 357 358 if (!regs) 359 return; 360 361 if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt)) 362 return; 363 364 if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending)) 365 return; 366 367 reg = _MMIO(regs[engine->id]); 368 369 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl 370 * we need to put a forcewake when invalidating RCS TLB caches, 371 * otherwise device can go to RC6 state and interrupt invalidation 372 * process 373 */ 374 fw = intel_uncore_forcewake_for_reg(uncore, reg, 375 FW_REG_READ | FW_REG_WRITE); 376 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9) 377 fw |= FORCEWAKE_RENDER; 378 379 intel_uncore_forcewake_get(uncore, fw); 380 381 intel_uncore_write_fw(uncore, reg, 0x1); 382 383 if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50)) 384 gvt_vgpu_err("timeout in invalidate ring %s tlb\n", 385 engine->name); 386 else 387 vgpu_vreg_t(vgpu, reg) = 0; 388 389 intel_uncore_forcewake_put(uncore, fw); 390 391 gvt_dbg_core("invalidate TLB for ring %s\n", engine->name); 392 } 393 394 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, 395 const struct intel_engine_cs *engine) 396 { 397 u32 regs[] = { 398 [RCS0] = 0xc800, 399 [VCS0] = 0xc900, 400 [VCS1] = 0xca00, 401 [BCS0] = 0xcc00, 402 [VECS0] = 0xcb00, 403 }; 404 struct intel_uncore *uncore = engine->uncore; 405 i915_reg_t offset, l3_offset; 406 u32 old_v, new_v; 407 int i; 408 409 if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs))) 410 return; 411 412 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9) 413 return; 414 415 if (!pre && !gen9_render_mocs.initialized) 416 load_render_mocs(engine); 417 418 offset.reg = regs[engine->id]; 419 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 420 if (pre) 421 old_v = vgpu_vreg_t(pre, offset); 422 else 423 old_v = gen9_render_mocs.control_table[engine->id][i]; 424 if (next) 425 new_v = vgpu_vreg_t(next, offset); 426 else 427 new_v = gen9_render_mocs.control_table[engine->id][i]; 428 429 if (old_v != new_v) 430 intel_uncore_write_fw(uncore, offset, new_v); 431 432 offset.reg += 4; 433 } 434 435 if (engine->id == RCS0) { 436 l3_offset.reg = 0xb020; 437 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 438 if (pre) 439 old_v = vgpu_vreg_t(pre, l3_offset); 440 else 441 old_v = gen9_render_mocs.l3cc_table[i]; 442 if (next) 443 new_v = vgpu_vreg_t(next, l3_offset); 444 else 445 new_v = gen9_render_mocs.l3cc_table[i]; 446 447 if (old_v != new_v) 448 intel_uncore_write_fw(uncore, l3_offset, new_v); 449 450 l3_offset.reg += 4; 451 } 452 } 453 } 454 455 #define CTX_CONTEXT_CONTROL_VAL 0x03 456 457 bool is_inhibit_context(struct intel_context *ce) 458 { 459 const u32 *reg_state = ce->lrc_reg_state; 460 u32 inhibit_mask = 461 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 462 463 return inhibit_mask == 464 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask); 465 } 466 467 /* Switch ring mmio values (context). */ 468 static void switch_mmio(struct intel_vgpu *pre, 469 struct intel_vgpu *next, 470 const struct intel_engine_cs *engine) 471 { 472 struct intel_uncore *uncore = engine->uncore; 473 struct intel_vgpu_submission *s; 474 struct engine_mmio *mmio; 475 u32 old_v, new_v; 476 477 if (GRAPHICS_VER(engine->i915) >= 9) 478 switch_mocs(pre, next, engine); 479 480 for (mmio = engine->i915->gvt->engine_mmio_list.mmio; 481 i915_mmio_reg_valid(mmio->reg); mmio++) { 482 if (mmio->id != engine->id) 483 continue; 484 /* 485 * No need to do save or restore of the mmio which is in context 486 * state image on gen9, it's initialized by lri command and 487 * save or restore with context together. 488 */ 489 if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context) 490 continue; 491 492 // save 493 if (pre) { 494 vgpu_vreg_t(pre, mmio->reg) = 495 intel_uncore_read_fw(uncore, mmio->reg); 496 if (mmio->mask) 497 vgpu_vreg_t(pre, mmio->reg) &= 498 ~(mmio->mask << 16); 499 old_v = vgpu_vreg_t(pre, mmio->reg); 500 } else { 501 old_v = mmio->value = 502 intel_uncore_read_fw(uncore, mmio->reg); 503 } 504 505 // restore 506 if (next) { 507 s = &next->submission; 508 /* 509 * No need to restore the mmio which is in context state 510 * image if it's not inhibit context, it will restore 511 * itself. 512 */ 513 if (mmio->in_context && 514 !is_inhibit_context(s->shadow[engine->id])) 515 continue; 516 517 if (mmio->mask) 518 new_v = vgpu_vreg_t(next, mmio->reg) | 519 (mmio->mask << 16); 520 else 521 new_v = vgpu_vreg_t(next, mmio->reg); 522 } else { 523 if (mmio->in_context) 524 continue; 525 if (mmio->mask) 526 new_v = mmio->value | (mmio->mask << 16); 527 else 528 new_v = mmio->value; 529 } 530 531 intel_uncore_write_fw(uncore, mmio->reg, new_v); 532 533 trace_render_mmio(pre ? pre->id : 0, 534 next ? next->id : 0, 535 "switch", 536 i915_mmio_reg_offset(mmio->reg), 537 old_v, new_v); 538 } 539 540 if (next) 541 handle_tlb_pending_event(next, engine); 542 } 543 544 /** 545 * intel_gvt_switch_render_mmio - switch mmio context of specific engine 546 * @pre: the last vGPU that own the engine 547 * @next: the vGPU to switch to 548 * @engine: the engine 549 * 550 * If pre is null indicates that host own the engine. If next is null 551 * indicates that we are switching to host workload. 552 */ 553 void intel_gvt_switch_mmio(struct intel_vgpu *pre, 554 struct intel_vgpu *next, 555 const struct intel_engine_cs *engine) 556 { 557 if (WARN(!pre && !next, "switch ring %s from host to HOST\n", 558 engine->name)) 559 return; 560 561 gvt_dbg_render("switch ring %s from %s to %s\n", engine->name, 562 pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); 563 564 /** 565 * We are using raw mmio access wrapper to improve the 566 * performace for batch mmio read/write, so we need 567 * handle forcewake mannually. 568 */ 569 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 570 switch_mmio(pre, next, engine); 571 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 572 } 573 574 /** 575 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list 576 * @gvt: GVT device 577 * 578 */ 579 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) 580 { 581 struct engine_mmio *mmio; 582 583 if (GRAPHICS_VER(gvt->gt->i915) >= 9) { 584 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; 585 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; 586 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); 587 gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list; 588 gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list); 589 } else { 590 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; 591 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; 592 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); 593 } 594 595 for (mmio = gvt->engine_mmio_list.mmio; 596 i915_mmio_reg_valid(mmio->reg); mmio++) { 597 if (mmio->in_context) { 598 gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; 599 intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg); 600 } 601 } 602 } 603