1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Kevin Tian <kevin.tian@intel.com> 26 * 27 * Contributors: 28 * Zhi Wang <zhi.a.wang@intel.com> 29 * Changbin Du <changbin.du@intel.com> 30 * Zhenyu Wang <zhenyuw@linux.intel.com> 31 * Tina Zhang <tina.zhang@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 #include "trace.h" 39 40 #define GEN9_MOCS_SIZE 64 41 42 /* Raw offset is appened to each line for convenience. */ 43 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { 44 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ 45 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 46 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ 47 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ 48 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ 49 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ 50 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ 51 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ 52 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ 53 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ 54 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ 55 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ 56 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ 57 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ 58 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ 59 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ 60 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ 61 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ 62 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ 63 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ 64 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ 65 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ 66 67 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ 68 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ 69 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 70 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 71 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ 72 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ 73 }; 74 75 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { 76 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ 77 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 78 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ 79 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ 80 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ 81 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ 82 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ 83 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ 84 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ 85 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ 86 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ 87 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ 88 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ 89 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ 90 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ 91 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ 92 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ 93 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ 94 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ 95 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ 96 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ 97 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ 98 99 {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ 100 {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ 101 {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ 102 {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ 103 {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ 104 {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ 105 {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ 106 {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ 107 {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ 108 {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ 109 {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ 110 {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ 111 {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ 112 {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ 113 {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */ 114 {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */ 115 {RCS0, TRVADR, 0, false}, /* 0x4df0 */ 116 {RCS0, TRTTE, 0, false}, /* 0x4df4 */ 117 118 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ 119 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ 120 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 121 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 122 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ 123 124 {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ 125 126 {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ 127 128 {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ 129 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 130 {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ 131 {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ 132 133 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 134 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ 135 {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ 136 137 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 138 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 139 {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ 140 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ 141 }; 142 143 static struct { 144 bool initialized; 145 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE]; 146 u32 l3cc_table[GEN9_MOCS_SIZE / 2]; 147 } gen9_render_mocs; 148 149 static void load_render_mocs(struct drm_i915_private *dev_priv) 150 { 151 i915_reg_t offset; 152 u32 regs[] = { 153 [RCS0] = 0xc800, 154 [VCS0] = 0xc900, 155 [VCS1] = 0xca00, 156 [BCS0] = 0xcc00, 157 [VECS0] = 0xcb00, 158 }; 159 int ring_id, i; 160 161 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { 162 if (!HAS_ENGINE(dev_priv, ring_id)) 163 continue; 164 offset.reg = regs[ring_id]; 165 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 166 gen9_render_mocs.control_table[ring_id][i] = 167 I915_READ_FW(offset); 168 offset.reg += 4; 169 } 170 } 171 172 offset.reg = 0xb020; 173 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 174 gen9_render_mocs.l3cc_table[i] = 175 I915_READ_FW(offset); 176 offset.reg += 4; 177 } 178 gen9_render_mocs.initialized = true; 179 } 180 181 static int 182 restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, 183 struct i915_request *req) 184 { 185 u32 *cs; 186 int ret; 187 struct engine_mmio *mmio; 188 struct intel_gvt *gvt = vgpu->gvt; 189 int ring_id = req->engine->id; 190 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 191 192 if (count == 0) 193 return 0; 194 195 ret = req->engine->emit_flush(req, EMIT_BARRIER); 196 if (ret) 197 return ret; 198 199 cs = intel_ring_begin(req, count * 2 + 2); 200 if (IS_ERR(cs)) 201 return PTR_ERR(cs); 202 203 *cs++ = MI_LOAD_REGISTER_IMM(count); 204 for (mmio = gvt->engine_mmio_list.mmio; 205 i915_mmio_reg_valid(mmio->reg); mmio++) { 206 if (mmio->ring_id != ring_id || 207 !mmio->in_context) 208 continue; 209 210 *cs++ = i915_mmio_reg_offset(mmio->reg); 211 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | 212 (mmio->mask << 16); 213 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 214 *(cs-2), *(cs-1), vgpu->id, ring_id); 215 } 216 217 *cs++ = MI_NOOP; 218 intel_ring_advance(req, cs); 219 220 ret = req->engine->emit_flush(req, EMIT_BARRIER); 221 if (ret) 222 return ret; 223 224 return 0; 225 } 226 227 static int 228 restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu, 229 struct i915_request *req) 230 { 231 unsigned int index; 232 u32 *cs; 233 234 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); 235 if (IS_ERR(cs)) 236 return PTR_ERR(cs); 237 238 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); 239 240 for (index = 0; index < GEN9_MOCS_SIZE; index++) { 241 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); 242 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); 243 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 244 *(cs-2), *(cs-1), vgpu->id, req->engine->id); 245 246 } 247 248 *cs++ = MI_NOOP; 249 intel_ring_advance(req, cs); 250 251 return 0; 252 } 253 254 static int 255 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu, 256 struct i915_request *req) 257 { 258 unsigned int index; 259 u32 *cs; 260 261 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); 262 if (IS_ERR(cs)) 263 return PTR_ERR(cs); 264 265 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); 266 267 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { 268 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); 269 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); 270 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 271 *(cs-2), *(cs-1), vgpu->id, req->engine->id); 272 273 } 274 275 *cs++ = MI_NOOP; 276 intel_ring_advance(req, cs); 277 278 return 0; 279 } 280 281 /* 282 * Use lri command to initialize the mmio which is in context state image for 283 * inhibit context, it contains tracked engine mmio, render_mocs and 284 * render_mocs_l3cc. 285 */ 286 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, 287 struct i915_request *req) 288 { 289 int ret; 290 u32 *cs; 291 292 cs = intel_ring_begin(req, 2); 293 if (IS_ERR(cs)) 294 return PTR_ERR(cs); 295 296 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 297 *cs++ = MI_NOOP; 298 intel_ring_advance(req, cs); 299 300 ret = restore_context_mmio_for_inhibit(vgpu, req); 301 if (ret) 302 goto out; 303 304 /* no MOCS register in context except render engine */ 305 if (req->engine->id != RCS0) 306 goto out; 307 308 ret = restore_render_mocs_control_for_inhibit(vgpu, req); 309 if (ret) 310 goto out; 311 312 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req); 313 if (ret) 314 goto out; 315 316 out: 317 cs = intel_ring_begin(req, 2); 318 if (IS_ERR(cs)) 319 return PTR_ERR(cs); 320 321 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 322 *cs++ = MI_NOOP; 323 intel_ring_advance(req, cs); 324 325 return ret; 326 } 327 328 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) 329 { 330 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 331 struct intel_uncore *uncore = &dev_priv->uncore; 332 struct intel_vgpu_submission *s = &vgpu->submission; 333 enum forcewake_domains fw; 334 i915_reg_t reg; 335 u32 regs[] = { 336 [RCS0] = 0x4260, 337 [VCS0] = 0x4264, 338 [VCS1] = 0x4268, 339 [BCS0] = 0x426c, 340 [VECS0] = 0x4270, 341 }; 342 343 if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) 344 return; 345 346 if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending)) 347 return; 348 349 reg = _MMIO(regs[ring_id]); 350 351 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl 352 * we need to put a forcewake when invalidating RCS TLB caches, 353 * otherwise device can go to RC6 state and interrupt invalidation 354 * process 355 */ 356 fw = intel_uncore_forcewake_for_reg(uncore, reg, 357 FW_REG_READ | FW_REG_WRITE); 358 if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) 359 fw |= FORCEWAKE_RENDER; 360 361 intel_uncore_forcewake_get(uncore, fw); 362 363 intel_uncore_write_fw(uncore, reg, 0x1); 364 365 if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50)) 366 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); 367 else 368 vgpu_vreg_t(vgpu, reg) = 0; 369 370 intel_uncore_forcewake_put(uncore, fw); 371 372 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); 373 } 374 375 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, 376 int ring_id) 377 { 378 struct drm_i915_private *dev_priv; 379 i915_reg_t offset, l3_offset; 380 u32 old_v, new_v; 381 382 u32 regs[] = { 383 [RCS0] = 0xc800, 384 [VCS0] = 0xc900, 385 [VCS1] = 0xca00, 386 [BCS0] = 0xcc00, 387 [VECS0] = 0xcb00, 388 }; 389 int i; 390 391 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; 392 if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) 393 return; 394 395 if (ring_id == RCS0 && 396 (IS_KABYLAKE(dev_priv) || 397 IS_BROXTON(dev_priv) || 398 IS_COFFEELAKE(dev_priv))) 399 return; 400 401 if (!pre && !gen9_render_mocs.initialized) 402 load_render_mocs(dev_priv); 403 404 offset.reg = regs[ring_id]; 405 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 406 if (pre) 407 old_v = vgpu_vreg_t(pre, offset); 408 else 409 old_v = gen9_render_mocs.control_table[ring_id][i]; 410 if (next) 411 new_v = vgpu_vreg_t(next, offset); 412 else 413 new_v = gen9_render_mocs.control_table[ring_id][i]; 414 415 if (old_v != new_v) 416 I915_WRITE_FW(offset, new_v); 417 418 offset.reg += 4; 419 } 420 421 if (ring_id == RCS0) { 422 l3_offset.reg = 0xb020; 423 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 424 if (pre) 425 old_v = vgpu_vreg_t(pre, l3_offset); 426 else 427 old_v = gen9_render_mocs.l3cc_table[i]; 428 if (next) 429 new_v = vgpu_vreg_t(next, l3_offset); 430 else 431 new_v = gen9_render_mocs.l3cc_table[i]; 432 433 if (old_v != new_v) 434 I915_WRITE_FW(l3_offset, new_v); 435 436 l3_offset.reg += 4; 437 } 438 } 439 } 440 441 #define CTX_CONTEXT_CONTROL_VAL 0x03 442 443 bool is_inhibit_context(struct intel_context *ce) 444 { 445 const u32 *reg_state = ce->lrc_reg_state; 446 u32 inhibit_mask = 447 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 448 449 return inhibit_mask == 450 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask); 451 } 452 453 /* Switch ring mmio values (context). */ 454 static void switch_mmio(struct intel_vgpu *pre, 455 struct intel_vgpu *next, 456 int ring_id) 457 { 458 struct drm_i915_private *dev_priv; 459 struct intel_vgpu_submission *s; 460 struct engine_mmio *mmio; 461 u32 old_v, new_v; 462 463 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; 464 if (INTEL_GEN(dev_priv) >= 9) 465 switch_mocs(pre, next, ring_id); 466 467 for (mmio = dev_priv->gvt->engine_mmio_list.mmio; 468 i915_mmio_reg_valid(mmio->reg); mmio++) { 469 if (mmio->ring_id != ring_id) 470 continue; 471 /* 472 * No need to do save or restore of the mmio which is in context 473 * state image on kabylake, it's initialized by lri command and 474 * save or restore with context together. 475 */ 476 if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) 477 || IS_COFFEELAKE(dev_priv)) && mmio->in_context) 478 continue; 479 480 // save 481 if (pre) { 482 vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg); 483 if (mmio->mask) 484 vgpu_vreg_t(pre, mmio->reg) &= 485 ~(mmio->mask << 16); 486 old_v = vgpu_vreg_t(pre, mmio->reg); 487 } else 488 old_v = mmio->value = I915_READ_FW(mmio->reg); 489 490 // restore 491 if (next) { 492 s = &next->submission; 493 /* 494 * No need to restore the mmio which is in context state 495 * image if it's not inhibit context, it will restore 496 * itself. 497 */ 498 if (mmio->in_context && 499 !is_inhibit_context(intel_context_lookup(s->shadow_ctx, 500 dev_priv->engine[ring_id]))) 501 continue; 502 503 if (mmio->mask) 504 new_v = vgpu_vreg_t(next, mmio->reg) | 505 (mmio->mask << 16); 506 else 507 new_v = vgpu_vreg_t(next, mmio->reg); 508 } else { 509 if (mmio->in_context) 510 continue; 511 if (mmio->mask) 512 new_v = mmio->value | (mmio->mask << 16); 513 else 514 new_v = mmio->value; 515 } 516 517 I915_WRITE_FW(mmio->reg, new_v); 518 519 trace_render_mmio(pre ? pre->id : 0, 520 next ? next->id : 0, 521 "switch", 522 i915_mmio_reg_offset(mmio->reg), 523 old_v, new_v); 524 } 525 526 if (next) 527 handle_tlb_pending_event(next, ring_id); 528 } 529 530 /** 531 * intel_gvt_switch_render_mmio - switch mmio context of specific engine 532 * @pre: the last vGPU that own the engine 533 * @next: the vGPU to switch to 534 * @ring_id: specify the engine 535 * 536 * If pre is null indicates that host own the engine. If next is null 537 * indicates that we are switching to host workload. 538 */ 539 void intel_gvt_switch_mmio(struct intel_vgpu *pre, 540 struct intel_vgpu *next, int ring_id) 541 { 542 struct drm_i915_private *dev_priv; 543 544 if (WARN_ON(!pre && !next)) 545 return; 546 547 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id, 548 pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); 549 550 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; 551 552 /** 553 * We are using raw mmio access wrapper to improve the 554 * performace for batch mmio read/write, so we need 555 * handle forcewake mannually. 556 */ 557 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 558 switch_mmio(pre, next, ring_id); 559 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 560 } 561 562 /** 563 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list 564 * @gvt: GVT device 565 * 566 */ 567 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) 568 { 569 struct engine_mmio *mmio; 570 571 if (INTEL_GEN(gvt->dev_priv) >= 9) 572 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; 573 else 574 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; 575 576 for (mmio = gvt->engine_mmio_list.mmio; 577 i915_mmio_reg_valid(mmio->reg); mmio++) { 578 if (mmio->in_context) { 579 gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; 580 intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); 581 } 582 } 583 } 584