1 /* 2 * GTT virtualization 3 * 4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Zhi Wang <zhi.a.wang@intel.com> 27 * Zhenyu Wang <zhenyuw@linux.intel.com> 28 * Xiao Zheng <xiao.zheng@intel.com> 29 * 30 * Contributors: 31 * Min He <min.he@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 #include "i915_pvinfo.h" 39 #include "trace.h" 40 41 static bool enable_out_of_sync = false; 42 static int preallocated_oos_pages = 8192; 43 44 /* 45 * validate a gm address and related range size, 46 * translate it to host gm address 47 */ 48 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 49 { 50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", 53 addr, size); 54 return false; 55 } 56 return true; 57 } 58 59 /* translate a guest gmadr to host gmadr */ 60 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) 61 { 62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), 63 "invalid guest gmadr %llx\n", g_addr)) 64 return -EACCES; 65 66 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) 67 *h_addr = vgpu_aperture_gmadr_base(vgpu) 68 + (g_addr - vgpu_aperture_offset(vgpu)); 69 else 70 *h_addr = vgpu_hidden_gmadr_base(vgpu) 71 + (g_addr - vgpu_hidden_offset(vgpu)); 72 return 0; 73 } 74 75 /* translate a host gmadr to guest gmadr */ 76 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) 77 { 78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), 79 "invalid host gmadr %llx\n", h_addr)) 80 return -EACCES; 81 82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) 83 *g_addr = vgpu_aperture_gmadr_base(vgpu) 84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); 85 else 86 *g_addr = vgpu_hidden_gmadr_base(vgpu) 87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); 88 return 0; 89 } 90 91 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 92 unsigned long *h_index) 93 { 94 u64 h_addr; 95 int ret; 96 97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, 98 &h_addr); 99 if (ret) 100 return ret; 101 102 *h_index = h_addr >> I915_GTT_PAGE_SHIFT; 103 return 0; 104 } 105 106 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 107 unsigned long *g_index) 108 { 109 u64 g_addr; 110 int ret; 111 112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, 113 &g_addr); 114 if (ret) 115 return ret; 116 117 *g_index = g_addr >> I915_GTT_PAGE_SHIFT; 118 return 0; 119 } 120 121 #define gtt_type_is_entry(type) \ 122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ 123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \ 124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY) 125 126 #define gtt_type_is_pt(type) \ 127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) 128 129 #define gtt_type_is_pte_pt(type) \ 130 (type == GTT_TYPE_PPGTT_PTE_PT) 131 132 #define gtt_type_is_root_pointer(type) \ 133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY) 134 135 #define gtt_init_entry(e, t, p, v) do { \ 136 (e)->type = t; \ 137 (e)->pdev = p; \ 138 memcpy(&(e)->val64, &v, sizeof(v)); \ 139 } while (0) 140 141 /* 142 * Mappings between GTT_TYPE* enumerations. 143 * Following information can be found according to the given type: 144 * - type of next level page table 145 * - type of entry inside this level page table 146 * - type of entry with PSE set 147 * 148 * If the given type doesn't have such a kind of information, 149 * e.g. give a l4 root entry type, then request to get its PSE type, 150 * give a PTE page table type, then request to get its next level page 151 * table type, as we know l4 root entry doesn't have a PSE bit, 152 * and a PTE page table doesn't have a next level page table type, 153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a 154 * page table. 155 */ 156 157 struct gtt_type_table_entry { 158 int entry_type; 159 int pt_type; 160 int next_pt_type; 161 int pse_entry_type; 162 }; 163 164 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ 165 [type] = { \ 166 .entry_type = e_type, \ 167 .pt_type = cpt_type, \ 168 .next_pt_type = npt_type, \ 169 .pse_entry_type = pse_type, \ 170 } 171 172 static struct gtt_type_table_entry gtt_type_table[] = { 173 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 174 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 175 GTT_TYPE_INVALID, 176 GTT_TYPE_PPGTT_PML4_PT, 177 GTT_TYPE_INVALID), 178 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, 179 GTT_TYPE_PPGTT_PML4_ENTRY, 180 GTT_TYPE_PPGTT_PML4_PT, 181 GTT_TYPE_PPGTT_PDP_PT, 182 GTT_TYPE_INVALID), 183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, 184 GTT_TYPE_PPGTT_PML4_ENTRY, 185 GTT_TYPE_PPGTT_PML4_PT, 186 GTT_TYPE_PPGTT_PDP_PT, 187 GTT_TYPE_INVALID), 188 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, 189 GTT_TYPE_PPGTT_PDP_ENTRY, 190 GTT_TYPE_PPGTT_PDP_PT, 191 GTT_TYPE_PPGTT_PDE_PT, 192 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 193 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 194 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 195 GTT_TYPE_INVALID, 196 GTT_TYPE_PPGTT_PDE_PT, 197 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 198 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, 199 GTT_TYPE_PPGTT_PDP_ENTRY, 200 GTT_TYPE_PPGTT_PDP_PT, 201 GTT_TYPE_PPGTT_PDE_PT, 202 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, 204 GTT_TYPE_PPGTT_PDE_ENTRY, 205 GTT_TYPE_PPGTT_PDE_PT, 206 GTT_TYPE_PPGTT_PTE_PT, 207 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 208 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, 209 GTT_TYPE_PPGTT_PDE_ENTRY, 210 GTT_TYPE_PPGTT_PDE_PT, 211 GTT_TYPE_PPGTT_PTE_PT, 212 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 213 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, 214 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 215 GTT_TYPE_PPGTT_PTE_PT, 216 GTT_TYPE_INVALID, 217 GTT_TYPE_INVALID), 218 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, 219 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 220 GTT_TYPE_PPGTT_PTE_PT, 221 GTT_TYPE_INVALID, 222 GTT_TYPE_INVALID), 223 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, 224 GTT_TYPE_PPGTT_PDE_ENTRY, 225 GTT_TYPE_PPGTT_PDE_PT, 226 GTT_TYPE_INVALID, 227 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 228 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, 229 GTT_TYPE_PPGTT_PDP_ENTRY, 230 GTT_TYPE_PPGTT_PDP_PT, 231 GTT_TYPE_INVALID, 232 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 233 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, 234 GTT_TYPE_GGTT_PTE, 235 GTT_TYPE_INVALID, 236 GTT_TYPE_INVALID, 237 GTT_TYPE_INVALID), 238 }; 239 240 static inline int get_next_pt_type(int type) 241 { 242 return gtt_type_table[type].next_pt_type; 243 } 244 245 static inline int get_pt_type(int type) 246 { 247 return gtt_type_table[type].pt_type; 248 } 249 250 static inline int get_entry_type(int type) 251 { 252 return gtt_type_table[type].entry_type; 253 } 254 255 static inline int get_pse_type(int type) 256 { 257 return gtt_type_table[type].pse_entry_type; 258 } 259 260 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 261 { 262 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 263 264 return readq(addr); 265 } 266 267 static void gtt_invalidate(struct drm_i915_private *dev_priv) 268 { 269 mmio_hw_access_pre(dev_priv); 270 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 271 mmio_hw_access_post(dev_priv); 272 } 273 274 static void write_pte64(struct drm_i915_private *dev_priv, 275 unsigned long index, u64 pte) 276 { 277 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 278 279 writeq(pte, addr); 280 } 281 282 static inline int gtt_get_entry64(void *pt, 283 struct intel_gvt_gtt_entry *e, 284 unsigned long index, bool hypervisor_access, unsigned long gpa, 285 struct intel_vgpu *vgpu) 286 { 287 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 288 int ret; 289 290 if (WARN_ON(info->gtt_entry_size != 8)) 291 return -EINVAL; 292 293 if (hypervisor_access) { 294 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + 295 (index << info->gtt_entry_size_shift), 296 &e->val64, 8); 297 if (WARN_ON(ret)) 298 return ret; 299 } else if (!pt) { 300 e->val64 = read_pte64(vgpu->gvt->dev_priv, index); 301 } else { 302 e->val64 = *((u64 *)pt + index); 303 } 304 return 0; 305 } 306 307 static inline int gtt_set_entry64(void *pt, 308 struct intel_gvt_gtt_entry *e, 309 unsigned long index, bool hypervisor_access, unsigned long gpa, 310 struct intel_vgpu *vgpu) 311 { 312 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 313 int ret; 314 315 if (WARN_ON(info->gtt_entry_size != 8)) 316 return -EINVAL; 317 318 if (hypervisor_access) { 319 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + 320 (index << info->gtt_entry_size_shift), 321 &e->val64, 8); 322 if (WARN_ON(ret)) 323 return ret; 324 } else if (!pt) { 325 write_pte64(vgpu->gvt->dev_priv, index, e->val64); 326 } else { 327 *((u64 *)pt + index) = e->val64; 328 } 329 return 0; 330 } 331 332 #define GTT_HAW 46 333 334 #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30) 335 #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21) 336 #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12) 337 338 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 339 { 340 unsigned long pfn; 341 342 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) 343 pfn = (e->val64 & ADDR_1G_MASK) >> 12; 344 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) 345 pfn = (e->val64 & ADDR_2M_MASK) >> 12; 346 else 347 pfn = (e->val64 & ADDR_4K_MASK) >> 12; 348 return pfn; 349 } 350 351 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) 352 { 353 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 354 e->val64 &= ~ADDR_1G_MASK; 355 pfn &= (ADDR_1G_MASK >> 12); 356 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { 357 e->val64 &= ~ADDR_2M_MASK; 358 pfn &= (ADDR_2M_MASK >> 12); 359 } else { 360 e->val64 &= ~ADDR_4K_MASK; 361 pfn &= (ADDR_4K_MASK >> 12); 362 } 363 364 e->val64 |= (pfn << 12); 365 } 366 367 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) 368 { 369 /* Entry doesn't have PSE bit. */ 370 if (get_pse_type(e->type) == GTT_TYPE_INVALID) 371 return false; 372 373 e->type = get_entry_type(e->type); 374 if (!(e->val64 & BIT(7))) 375 return false; 376 377 e->type = get_pse_type(e->type); 378 return true; 379 } 380 381 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) 382 { 383 /* 384 * i915 writes PDP root pointer registers without present bit, 385 * it also works, so we need to treat root pointer entry 386 * specifically. 387 */ 388 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY 389 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 390 return (e->val64 != 0); 391 else 392 return (e->val64 & BIT(0)); 393 } 394 395 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) 396 { 397 e->val64 &= ~BIT(0); 398 } 399 400 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) 401 { 402 e->val64 |= BIT(0); 403 } 404 405 /* 406 * Per-platform GMA routines. 407 */ 408 static unsigned long gma_to_ggtt_pte_index(unsigned long gma) 409 { 410 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); 411 412 trace_gma_index(__func__, gma, x); 413 return x; 414 } 415 416 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \ 417 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \ 418 { \ 419 unsigned long x = (exp); \ 420 trace_gma_index(__func__, gma, x); \ 421 return x; \ 422 } 423 424 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff)); 425 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff)); 426 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); 427 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); 428 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); 429 430 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { 431 .get_entry = gtt_get_entry64, 432 .set_entry = gtt_set_entry64, 433 .clear_present = gtt_entry_clear_present, 434 .set_present = gtt_entry_set_present, 435 .test_present = gen8_gtt_test_present, 436 .test_pse = gen8_gtt_test_pse, 437 .get_pfn = gen8_gtt_get_pfn, 438 .set_pfn = gen8_gtt_set_pfn, 439 }; 440 441 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { 442 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, 443 .gma_to_pte_index = gen8_gma_to_pte_index, 444 .gma_to_pde_index = gen8_gma_to_pde_index, 445 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index, 446 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index, 447 .gma_to_pml4_index = gen8_gma_to_pml4_index, 448 }; 449 450 static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, 451 struct intel_gvt_gtt_entry *m) 452 { 453 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 454 unsigned long gfn, mfn; 455 456 *m = *p; 457 458 if (!ops->test_present(p)) 459 return 0; 460 461 gfn = ops->get_pfn(p); 462 463 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 464 if (mfn == INTEL_GVT_INVALID_ADDR) { 465 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); 466 return -ENXIO; 467 } 468 469 ops->set_pfn(m, mfn); 470 return 0; 471 } 472 473 /* 474 * MM helpers. 475 */ 476 int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm, 477 void *page_table, struct intel_gvt_gtt_entry *e, 478 unsigned long index) 479 { 480 struct intel_gvt *gvt = mm->vgpu->gvt; 481 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 482 int ret; 483 484 e->type = mm->page_table_entry_type; 485 486 ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu); 487 if (ret) 488 return ret; 489 490 ops->test_pse(e); 491 return 0; 492 } 493 494 int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm, 495 void *page_table, struct intel_gvt_gtt_entry *e, 496 unsigned long index) 497 { 498 struct intel_gvt *gvt = mm->vgpu->gvt; 499 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 500 501 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu); 502 } 503 504 /* 505 * PPGTT shadow page table helpers. 506 */ 507 static inline int ppgtt_spt_get_entry( 508 struct intel_vgpu_ppgtt_spt *spt, 509 void *page_table, int type, 510 struct intel_gvt_gtt_entry *e, unsigned long index, 511 bool guest) 512 { 513 struct intel_gvt *gvt = spt->vgpu->gvt; 514 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 515 int ret; 516 517 e->type = get_entry_type(type); 518 519 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 520 return -EINVAL; 521 522 ret = ops->get_entry(page_table, e, index, guest, 523 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, 524 spt->vgpu); 525 if (ret) 526 return ret; 527 528 ops->test_pse(e); 529 return 0; 530 } 531 532 static inline int ppgtt_spt_set_entry( 533 struct intel_vgpu_ppgtt_spt *spt, 534 void *page_table, int type, 535 struct intel_gvt_gtt_entry *e, unsigned long index, 536 bool guest) 537 { 538 struct intel_gvt *gvt = spt->vgpu->gvt; 539 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 540 541 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 542 return -EINVAL; 543 544 return ops->set_entry(page_table, e, index, guest, 545 spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, 546 spt->vgpu); 547 } 548 549 #define ppgtt_get_guest_entry(spt, e, index) \ 550 ppgtt_spt_get_entry(spt, NULL, \ 551 spt->guest_page_type, e, index, true) 552 553 #define ppgtt_set_guest_entry(spt, e, index) \ 554 ppgtt_spt_set_entry(spt, NULL, \ 555 spt->guest_page_type, e, index, true) 556 557 #define ppgtt_get_shadow_entry(spt, e, index) \ 558 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ 559 spt->shadow_page.type, e, index, false) 560 561 #define ppgtt_set_shadow_entry(spt, e, index) \ 562 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ 563 spt->shadow_page.type, e, index, false) 564 565 /** 566 * intel_vgpu_init_page_track - init a page track data structure 567 * @vgpu: a vGPU 568 * @t: a page track data structure 569 * @gfn: guest memory page frame number 570 * @handler: the function will be called when target guest memory page has 571 * been modified. 572 * 573 * This function is called when a user wants to prepare a page track data 574 * structure to track a guest memory page. 575 * 576 * Returns: 577 * Zero on success, negative error code if failed. 578 */ 579 int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, 580 struct intel_vgpu_page_track *t, 581 unsigned long gfn, 582 int (*handler)(void *, u64, void *, int), 583 void *data) 584 { 585 INIT_HLIST_NODE(&t->node); 586 587 t->tracked = false; 588 t->gfn = gfn; 589 t->handler = handler; 590 t->data = data; 591 592 hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn); 593 return 0; 594 } 595 596 /** 597 * intel_vgpu_clean_page_track - release a page track data structure 598 * @vgpu: a vGPU 599 * @t: a page track data structure 600 * 601 * This function is called before a user frees a page track data structure. 602 */ 603 void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, 604 struct intel_vgpu_page_track *t) 605 { 606 if (!hlist_unhashed(&t->node)) 607 hash_del(&t->node); 608 609 if (t->tracked) 610 intel_gvt_hypervisor_disable_page_track(vgpu, t); 611 } 612 613 /** 614 * intel_vgpu_find_tracked_page - find a tracked guest page 615 * @vgpu: a vGPU 616 * @gfn: guest memory page frame number 617 * 618 * This function is called when the emulation layer wants to figure out if a 619 * trapped GFN is a tracked guest page. 620 * 621 * Returns: 622 * Pointer to page track data structure, NULL if not found. 623 */ 624 struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( 625 struct intel_vgpu *vgpu, unsigned long gfn) 626 { 627 struct intel_vgpu_page_track *t; 628 629 hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table, 630 t, node, gfn) { 631 if (t->gfn == gfn) 632 return t; 633 } 634 return NULL; 635 } 636 637 static int init_guest_page(struct intel_vgpu *vgpu, 638 struct intel_vgpu_guest_page *p, 639 unsigned long gfn, 640 int (*handler)(void *, u64, void *, int), 641 void *data) 642 { 643 p->oos_page = NULL; 644 p->write_cnt = 0; 645 646 return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data); 647 } 648 649 static int detach_oos_page(struct intel_vgpu *vgpu, 650 struct intel_vgpu_oos_page *oos_page); 651 652 static void clean_guest_page(struct intel_vgpu *vgpu, 653 struct intel_vgpu_guest_page *p) 654 { 655 if (p->oos_page) 656 detach_oos_page(vgpu, p->oos_page); 657 658 intel_vgpu_clean_page_track(vgpu, &p->track); 659 } 660 661 static inline int init_shadow_page(struct intel_vgpu *vgpu, 662 struct intel_vgpu_shadow_page *p, int type, bool hash) 663 { 664 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 665 dma_addr_t daddr; 666 667 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 668 if (dma_mapping_error(kdev, daddr)) { 669 gvt_vgpu_err("fail to map dma addr\n"); 670 return -EINVAL; 671 } 672 673 p->vaddr = page_address(p->page); 674 p->type = type; 675 676 INIT_HLIST_NODE(&p->node); 677 678 p->mfn = daddr >> I915_GTT_PAGE_SHIFT; 679 if (hash) 680 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); 681 return 0; 682 } 683 684 static inline void clean_shadow_page(struct intel_vgpu *vgpu, 685 struct intel_vgpu_shadow_page *p) 686 { 687 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 688 689 dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096, 690 PCI_DMA_BIDIRECTIONAL); 691 692 if (!hlist_unhashed(&p->node)) 693 hash_del(&p->node); 694 } 695 696 static inline struct intel_vgpu_shadow_page *find_shadow_page( 697 struct intel_vgpu *vgpu, unsigned long mfn) 698 { 699 struct intel_vgpu_shadow_page *p; 700 701 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table, 702 p, node, mfn) { 703 if (p->mfn == mfn) 704 return p; 705 } 706 return NULL; 707 } 708 709 #define page_track_to_guest_page(ptr) \ 710 container_of(ptr, struct intel_vgpu_guest_page, track) 711 712 #define guest_page_to_ppgtt_spt(ptr) \ 713 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page) 714 715 #define shadow_page_to_ppgtt_spt(ptr) \ 716 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page) 717 718 static void *alloc_spt(gfp_t gfp_mask) 719 { 720 struct intel_vgpu_ppgtt_spt *spt; 721 722 spt = kzalloc(sizeof(*spt), gfp_mask); 723 if (!spt) 724 return NULL; 725 726 spt->shadow_page.page = alloc_page(gfp_mask); 727 if (!spt->shadow_page.page) { 728 kfree(spt); 729 return NULL; 730 } 731 return spt; 732 } 733 734 static void free_spt(struct intel_vgpu_ppgtt_spt *spt) 735 { 736 __free_page(spt->shadow_page.page); 737 kfree(spt); 738 } 739 740 static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 741 { 742 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); 743 744 clean_shadow_page(spt->vgpu, &spt->shadow_page); 745 clean_guest_page(spt->vgpu, &spt->guest_page); 746 list_del_init(&spt->post_shadow_list); 747 748 free_spt(spt); 749 } 750 751 static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu) 752 { 753 struct hlist_node *n; 754 struct intel_vgpu_shadow_page *sp; 755 int i; 756 757 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node) 758 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp)); 759 } 760 761 static int ppgtt_handle_guest_write_page_table_bytes( 762 struct intel_vgpu_guest_page *gpt, 763 u64 pa, void *p_data, int bytes); 764 765 static int ppgtt_write_protection_handler(void *data, u64 pa, 766 void *p_data, int bytes) 767 { 768 struct intel_vgpu_page_track *t = data; 769 struct intel_vgpu_guest_page *p = page_track_to_guest_page(t); 770 int ret; 771 772 if (bytes != 4 && bytes != 8) 773 return -EINVAL; 774 775 if (!t->tracked) 776 return -EINVAL; 777 778 ret = ppgtt_handle_guest_write_page_table_bytes(p, 779 pa, p_data, bytes); 780 if (ret) 781 return ret; 782 return ret; 783 } 784 785 static int reclaim_one_mm(struct intel_gvt *gvt); 786 787 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( 788 struct intel_vgpu *vgpu, int type, unsigned long gfn) 789 { 790 struct intel_vgpu_ppgtt_spt *spt = NULL; 791 int ret; 792 793 retry: 794 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); 795 if (!spt) { 796 if (reclaim_one_mm(vgpu->gvt)) 797 goto retry; 798 799 gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); 800 return ERR_PTR(-ENOMEM); 801 } 802 803 spt->vgpu = vgpu; 804 spt->guest_page_type = type; 805 atomic_set(&spt->refcount, 1); 806 INIT_LIST_HEAD(&spt->post_shadow_list); 807 808 /* 809 * TODO: guest page type may be different with shadow page type, 810 * when we support PSE page in future. 811 */ 812 ret = init_shadow_page(vgpu, &spt->shadow_page, type, true); 813 if (ret) { 814 gvt_vgpu_err("fail to initialize shadow page for spt\n"); 815 goto err; 816 } 817 818 ret = init_guest_page(vgpu, &spt->guest_page, 819 gfn, ppgtt_write_protection_handler, NULL); 820 if (ret) { 821 gvt_vgpu_err("fail to initialize guest page for spt\n"); 822 goto err; 823 } 824 825 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); 826 return spt; 827 err: 828 ppgtt_free_shadow_page(spt); 829 return ERR_PTR(ret); 830 } 831 832 static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( 833 struct intel_vgpu *vgpu, unsigned long mfn) 834 { 835 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn); 836 837 if (p) 838 return shadow_page_to_ppgtt_spt(p); 839 840 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); 841 return NULL; 842 } 843 844 #define pt_entry_size_shift(spt) \ 845 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) 846 847 #define pt_entries(spt) \ 848 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) 849 850 #define for_each_present_guest_entry(spt, e, i) \ 851 for (i = 0; i < pt_entries(spt); i++) \ 852 if (!ppgtt_get_guest_entry(spt, e, i) && \ 853 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 854 855 #define for_each_present_shadow_entry(spt, e, i) \ 856 for (i = 0; i < pt_entries(spt); i++) \ 857 if (!ppgtt_get_shadow_entry(spt, e, i) && \ 858 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 859 860 static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 861 { 862 int v = atomic_read(&spt->refcount); 863 864 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); 865 866 atomic_inc(&spt->refcount); 867 } 868 869 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt); 870 871 static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, 872 struct intel_gvt_gtt_entry *e) 873 { 874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 875 struct intel_vgpu_ppgtt_spt *s; 876 intel_gvt_gtt_type_t cur_pt_type; 877 878 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type)))) 879 return -EINVAL; 880 881 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 882 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 883 cur_pt_type = get_next_pt_type(e->type) + 1; 884 if (ops->get_pfn(e) == 885 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 886 return 0; 887 } 888 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 889 if (!s) { 890 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 891 ops->get_pfn(e)); 892 return -ENXIO; 893 } 894 return ppgtt_invalidate_shadow_page(s); 895 } 896 897 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 898 { 899 struct intel_vgpu *vgpu = spt->vgpu; 900 struct intel_gvt_gtt_entry e; 901 unsigned long index; 902 int ret; 903 int v = atomic_read(&spt->refcount); 904 905 trace_spt_change(spt->vgpu->id, "die", spt, 906 spt->guest_page.track.gfn, spt->shadow_page.type); 907 908 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); 909 910 if (atomic_dec_return(&spt->refcount) > 0) 911 return 0; 912 913 if (gtt_type_is_pte_pt(spt->shadow_page.type)) 914 goto release; 915 916 for_each_present_shadow_entry(spt, &e, index) { 917 if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 918 gvt_vgpu_err("GVT doesn't support pse bit for now\n"); 919 return -EINVAL; 920 } 921 ret = ppgtt_invalidate_shadow_page_by_shadow_entry( 922 spt->vgpu, &e); 923 if (ret) 924 goto fail; 925 } 926 release: 927 trace_spt_change(spt->vgpu->id, "release", spt, 928 spt->guest_page.track.gfn, spt->shadow_page.type); 929 ppgtt_free_shadow_page(spt); 930 return 0; 931 fail: 932 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", 933 spt, e.val64, e.type); 934 return ret; 935 } 936 937 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt); 938 939 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( 940 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) 941 { 942 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 943 struct intel_vgpu_ppgtt_spt *s = NULL; 944 struct intel_vgpu_guest_page *g; 945 struct intel_vgpu_page_track *t; 946 int ret; 947 948 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) { 949 ret = -EINVAL; 950 goto fail; 951 } 952 953 t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we)); 954 if (t) { 955 g = page_track_to_guest_page(t); 956 s = guest_page_to_ppgtt_spt(g); 957 ppgtt_get_shadow_page(s); 958 } else { 959 int type = get_next_pt_type(we->type); 960 961 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we)); 962 if (IS_ERR(s)) { 963 ret = PTR_ERR(s); 964 goto fail; 965 } 966 967 ret = intel_gvt_hypervisor_enable_page_track(vgpu, 968 &s->guest_page.track); 969 if (ret) 970 goto fail; 971 972 ret = ppgtt_populate_shadow_page(s); 973 if (ret) 974 goto fail; 975 976 trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn, 977 s->shadow_page.type); 978 } 979 return s; 980 fail: 981 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 982 s, we->val64, we->type); 983 return ERR_PTR(ret); 984 } 985 986 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, 987 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) 988 { 989 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; 990 991 se->type = ge->type; 992 se->val64 = ge->val64; 993 994 ops->set_pfn(se, s->shadow_page.mfn); 995 } 996 997 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 998 { 999 struct intel_vgpu *vgpu = spt->vgpu; 1000 struct intel_vgpu_ppgtt_spt *s; 1001 struct intel_gvt_gtt_entry se, ge; 1002 unsigned long i; 1003 int ret; 1004 1005 trace_spt_change(spt->vgpu->id, "born", spt, 1006 spt->guest_page.track.gfn, spt->shadow_page.type); 1007 1008 if (gtt_type_is_pte_pt(spt->shadow_page.type)) { 1009 for_each_present_guest_entry(spt, &ge, i) { 1010 ret = gtt_entry_p2m(vgpu, &ge, &se); 1011 if (ret) 1012 goto fail; 1013 ppgtt_set_shadow_entry(spt, &se, i); 1014 } 1015 return 0; 1016 } 1017 1018 for_each_present_guest_entry(spt, &ge, i) { 1019 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 1020 gvt_vgpu_err("GVT doesn't support pse bit now\n"); 1021 ret = -EINVAL; 1022 goto fail; 1023 } 1024 1025 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1026 if (IS_ERR(s)) { 1027 ret = PTR_ERR(s); 1028 goto fail; 1029 } 1030 ppgtt_get_shadow_entry(spt, &se, i); 1031 ppgtt_generate_shadow_entry(&se, s, &ge); 1032 ppgtt_set_shadow_entry(spt, &se, i); 1033 } 1034 return 0; 1035 fail: 1036 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1037 spt, ge.val64, ge.type); 1038 return ret; 1039 } 1040 1041 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, 1042 struct intel_gvt_gtt_entry *se, unsigned long index) 1043 { 1044 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1045 struct intel_vgpu_shadow_page *sp = &spt->shadow_page; 1046 struct intel_vgpu *vgpu = spt->vgpu; 1047 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1048 int ret; 1049 1050 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64, 1051 index); 1052 1053 if (!ops->test_present(se)) 1054 return 0; 1055 1056 if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn) 1057 return 0; 1058 1059 if (gtt_type_is_pt(get_next_pt_type(se->type))) { 1060 struct intel_vgpu_ppgtt_spt *s = 1061 ppgtt_find_shadow_page(vgpu, ops->get_pfn(se)); 1062 if (!s) { 1063 gvt_vgpu_err("fail to find guest page\n"); 1064 ret = -ENXIO; 1065 goto fail; 1066 } 1067 ret = ppgtt_invalidate_shadow_page(s); 1068 if (ret) 1069 goto fail; 1070 } 1071 return 0; 1072 fail: 1073 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1074 spt, se->val64, se->type); 1075 return ret; 1076 } 1077 1078 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, 1079 struct intel_gvt_gtt_entry *we, unsigned long index) 1080 { 1081 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1082 struct intel_vgpu_shadow_page *sp = &spt->shadow_page; 1083 struct intel_vgpu *vgpu = spt->vgpu; 1084 struct intel_gvt_gtt_entry m; 1085 struct intel_vgpu_ppgtt_spt *s; 1086 int ret; 1087 1088 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type, 1089 we->val64, index); 1090 1091 if (gtt_type_is_pt(get_next_pt_type(we->type))) { 1092 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we); 1093 if (IS_ERR(s)) { 1094 ret = PTR_ERR(s); 1095 goto fail; 1096 } 1097 ppgtt_get_shadow_entry(spt, &m, index); 1098 ppgtt_generate_shadow_entry(&m, s, we); 1099 ppgtt_set_shadow_entry(spt, &m, index); 1100 } else { 1101 ret = gtt_entry_p2m(vgpu, we, &m); 1102 if (ret) 1103 goto fail; 1104 ppgtt_set_shadow_entry(spt, &m, index); 1105 } 1106 return 0; 1107 fail: 1108 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", 1109 spt, we->val64, we->type); 1110 return ret; 1111 } 1112 1113 static int sync_oos_page(struct intel_vgpu *vgpu, 1114 struct intel_vgpu_oos_page *oos_page) 1115 { 1116 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1117 struct intel_gvt *gvt = vgpu->gvt; 1118 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1119 struct intel_vgpu_ppgtt_spt *spt = 1120 guest_page_to_ppgtt_spt(oos_page->guest_page); 1121 struct intel_gvt_gtt_entry old, new, m; 1122 int index; 1123 int ret; 1124 1125 trace_oos_change(vgpu->id, "sync", oos_page->id, 1126 oos_page->guest_page, spt->guest_page_type); 1127 1128 old.type = new.type = get_entry_type(spt->guest_page_type); 1129 old.val64 = new.val64 = 0; 1130 1131 for (index = 0; index < (I915_GTT_PAGE_SIZE >> 1132 info->gtt_entry_size_shift); index++) { 1133 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); 1134 ops->get_entry(NULL, &new, index, true, 1135 oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu); 1136 1137 if (old.val64 == new.val64 1138 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) 1139 continue; 1140 1141 trace_oos_sync(vgpu->id, oos_page->id, 1142 oos_page->guest_page, spt->guest_page_type, 1143 new.val64, index); 1144 1145 ret = gtt_entry_p2m(vgpu, &new, &m); 1146 if (ret) 1147 return ret; 1148 1149 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); 1150 ppgtt_set_shadow_entry(spt, &m, index); 1151 } 1152 1153 oos_page->guest_page->write_cnt = 0; 1154 list_del_init(&spt->post_shadow_list); 1155 return 0; 1156 } 1157 1158 static int detach_oos_page(struct intel_vgpu *vgpu, 1159 struct intel_vgpu_oos_page *oos_page) 1160 { 1161 struct intel_gvt *gvt = vgpu->gvt; 1162 struct intel_vgpu_ppgtt_spt *spt = 1163 guest_page_to_ppgtt_spt(oos_page->guest_page); 1164 1165 trace_oos_change(vgpu->id, "detach", oos_page->id, 1166 oos_page->guest_page, spt->guest_page_type); 1167 1168 oos_page->guest_page->write_cnt = 0; 1169 oos_page->guest_page->oos_page = NULL; 1170 oos_page->guest_page = NULL; 1171 1172 list_del_init(&oos_page->vm_list); 1173 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); 1174 1175 return 0; 1176 } 1177 1178 static int attach_oos_page(struct intel_vgpu *vgpu, 1179 struct intel_vgpu_oos_page *oos_page, 1180 struct intel_vgpu_guest_page *gpt) 1181 { 1182 struct intel_gvt *gvt = vgpu->gvt; 1183 int ret; 1184 1185 ret = intel_gvt_hypervisor_read_gpa(vgpu, 1186 gpt->track.gfn << I915_GTT_PAGE_SHIFT, 1187 oos_page->mem, I915_GTT_PAGE_SIZE); 1188 if (ret) 1189 return ret; 1190 1191 oos_page->guest_page = gpt; 1192 gpt->oos_page = oos_page; 1193 1194 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); 1195 1196 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id, 1197 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1198 return 0; 1199 } 1200 1201 static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu, 1202 struct intel_vgpu_guest_page *gpt) 1203 { 1204 int ret; 1205 1206 ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track); 1207 if (ret) 1208 return ret; 1209 1210 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id, 1211 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1212 1213 list_del_init(&gpt->oos_page->vm_list); 1214 return sync_oos_page(vgpu, gpt->oos_page); 1215 } 1216 1217 static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu, 1218 struct intel_vgpu_guest_page *gpt) 1219 { 1220 struct intel_gvt *gvt = vgpu->gvt; 1221 struct intel_gvt_gtt *gtt = &gvt->gtt; 1222 struct intel_vgpu_oos_page *oos_page = gpt->oos_page; 1223 int ret; 1224 1225 WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); 1226 1227 if (list_empty(>t->oos_page_free_list_head)) { 1228 oos_page = container_of(gtt->oos_page_use_list_head.next, 1229 struct intel_vgpu_oos_page, list); 1230 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); 1231 if (ret) 1232 return ret; 1233 ret = detach_oos_page(vgpu, oos_page); 1234 if (ret) 1235 return ret; 1236 } else 1237 oos_page = container_of(gtt->oos_page_free_list_head.next, 1238 struct intel_vgpu_oos_page, list); 1239 return attach_oos_page(vgpu, oos_page, gpt); 1240 } 1241 1242 static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu, 1243 struct intel_vgpu_guest_page *gpt) 1244 { 1245 struct intel_vgpu_oos_page *oos_page = gpt->oos_page; 1246 1247 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) 1248 return -EINVAL; 1249 1250 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id, 1251 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1252 1253 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head); 1254 return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track); 1255 } 1256 1257 /** 1258 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU 1259 * @vgpu: a vGPU 1260 * 1261 * This function is called before submitting a guest workload to host, 1262 * to sync all the out-of-synced shadow for vGPU 1263 * 1264 * Returns: 1265 * Zero on success, negative error code if failed. 1266 */ 1267 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) 1268 { 1269 struct list_head *pos, *n; 1270 struct intel_vgpu_oos_page *oos_page; 1271 int ret; 1272 1273 if (!enable_out_of_sync) 1274 return 0; 1275 1276 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { 1277 oos_page = container_of(pos, 1278 struct intel_vgpu_oos_page, vm_list); 1279 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); 1280 if (ret) 1281 return ret; 1282 } 1283 return 0; 1284 } 1285 1286 /* 1287 * The heart of PPGTT shadow page table. 1288 */ 1289 static int ppgtt_handle_guest_write_page_table( 1290 struct intel_vgpu_guest_page *gpt, 1291 struct intel_gvt_gtt_entry *we, unsigned long index) 1292 { 1293 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1294 struct intel_vgpu *vgpu = spt->vgpu; 1295 int type = spt->shadow_page.type; 1296 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1297 struct intel_gvt_gtt_entry se; 1298 1299 int ret; 1300 int new_present; 1301 1302 new_present = ops->test_present(we); 1303 1304 /* 1305 * Adding the new entry first and then removing the old one, that can 1306 * guarantee the ppgtt table is validated during the window between 1307 * adding and removal. 1308 */ 1309 ppgtt_get_shadow_entry(spt, &se, index); 1310 1311 if (new_present) { 1312 ret = ppgtt_handle_guest_entry_add(gpt, we, index); 1313 if (ret) 1314 goto fail; 1315 } 1316 1317 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); 1318 if (ret) 1319 goto fail; 1320 1321 if (!new_present) { 1322 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); 1323 ppgtt_set_shadow_entry(spt, &se, index); 1324 } 1325 1326 return 0; 1327 fail: 1328 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", 1329 spt, we->val64, we->type); 1330 return ret; 1331 } 1332 1333 static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt) 1334 { 1335 return enable_out_of_sync 1336 && gtt_type_is_pte_pt( 1337 guest_page_to_ppgtt_spt(gpt)->guest_page_type) 1338 && gpt->write_cnt >= 2; 1339 } 1340 1341 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, 1342 unsigned long index) 1343 { 1344 set_bit(index, spt->post_shadow_bitmap); 1345 if (!list_empty(&spt->post_shadow_list)) 1346 return; 1347 1348 list_add_tail(&spt->post_shadow_list, 1349 &spt->vgpu->gtt.post_shadow_list_head); 1350 } 1351 1352 /** 1353 * intel_vgpu_flush_post_shadow - flush the post shadow transactions 1354 * @vgpu: a vGPU 1355 * 1356 * This function is called before submitting a guest workload to host, 1357 * to flush all the post shadows for a vGPU. 1358 * 1359 * Returns: 1360 * Zero on success, negative error code if failed. 1361 */ 1362 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) 1363 { 1364 struct list_head *pos, *n; 1365 struct intel_vgpu_ppgtt_spt *spt; 1366 struct intel_gvt_gtt_entry ge; 1367 unsigned long index; 1368 int ret; 1369 1370 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { 1371 spt = container_of(pos, struct intel_vgpu_ppgtt_spt, 1372 post_shadow_list); 1373 1374 for_each_set_bit(index, spt->post_shadow_bitmap, 1375 GTT_ENTRY_NUM_IN_ONE_PAGE) { 1376 ppgtt_get_guest_entry(spt, &ge, index); 1377 1378 ret = ppgtt_handle_guest_write_page_table( 1379 &spt->guest_page, &ge, index); 1380 if (ret) 1381 return ret; 1382 clear_bit(index, spt->post_shadow_bitmap); 1383 } 1384 list_del_init(&spt->post_shadow_list); 1385 } 1386 return 0; 1387 } 1388 1389 static int ppgtt_handle_guest_write_page_table_bytes( 1390 struct intel_vgpu_guest_page *gpt, 1391 u64 pa, void *p_data, int bytes) 1392 { 1393 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1394 struct intel_vgpu *vgpu = spt->vgpu; 1395 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1396 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1397 struct intel_gvt_gtt_entry we, se; 1398 unsigned long index; 1399 int ret; 1400 1401 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; 1402 1403 ppgtt_get_guest_entry(spt, &we, index); 1404 1405 ops->test_pse(&we); 1406 1407 if (bytes == info->gtt_entry_size) { 1408 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index); 1409 if (ret) 1410 return ret; 1411 } else { 1412 if (!test_bit(index, spt->post_shadow_bitmap)) { 1413 ppgtt_get_shadow_entry(spt, &se, index); 1414 ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); 1415 if (ret) 1416 return ret; 1417 } 1418 1419 ppgtt_set_post_shadow(spt, index); 1420 } 1421 1422 if (!enable_out_of_sync) 1423 return 0; 1424 1425 gpt->write_cnt++; 1426 1427 if (gpt->oos_page) 1428 ops->set_entry(gpt->oos_page->mem, &we, index, 1429 false, 0, vgpu); 1430 1431 if (can_do_out_of_sync(gpt)) { 1432 if (!gpt->oos_page) 1433 ppgtt_allocate_oos_page(vgpu, gpt); 1434 1435 ret = ppgtt_set_guest_page_oos(vgpu, gpt); 1436 if (ret < 0) 1437 return ret; 1438 } 1439 return 0; 1440 } 1441 1442 /* 1443 * mm page table allocation policy for bdw+ 1444 * - for ggtt, only virtual page table will be allocated. 1445 * - for ppgtt, dedicated virtual/shadow page table will be allocated. 1446 */ 1447 static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) 1448 { 1449 struct intel_vgpu *vgpu = mm->vgpu; 1450 struct intel_gvt *gvt = vgpu->gvt; 1451 const struct intel_gvt_device_info *info = &gvt->device_info; 1452 void *mem; 1453 1454 if (mm->type == INTEL_GVT_MM_PPGTT) { 1455 mm->page_table_entry_cnt = 4; 1456 mm->page_table_entry_size = mm->page_table_entry_cnt * 1457 info->gtt_entry_size; 1458 mem = kzalloc(mm->has_shadow_page_table ? 1459 mm->page_table_entry_size * 2 1460 : mm->page_table_entry_size, GFP_KERNEL); 1461 if (!mem) 1462 return -ENOMEM; 1463 mm->virtual_page_table = mem; 1464 if (!mm->has_shadow_page_table) 1465 return 0; 1466 mm->shadow_page_table = mem + mm->page_table_entry_size; 1467 } else if (mm->type == INTEL_GVT_MM_GGTT) { 1468 mm->page_table_entry_cnt = 1469 (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT); 1470 mm->page_table_entry_size = mm->page_table_entry_cnt * 1471 info->gtt_entry_size; 1472 mem = vzalloc(mm->page_table_entry_size); 1473 if (!mem) 1474 return -ENOMEM; 1475 mm->virtual_page_table = mem; 1476 } 1477 return 0; 1478 } 1479 1480 static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm) 1481 { 1482 if (mm->type == INTEL_GVT_MM_PPGTT) { 1483 kfree(mm->virtual_page_table); 1484 } else if (mm->type == INTEL_GVT_MM_GGTT) { 1485 if (mm->virtual_page_table) 1486 vfree(mm->virtual_page_table); 1487 } 1488 mm->virtual_page_table = mm->shadow_page_table = NULL; 1489 } 1490 1491 static void invalidate_mm(struct intel_vgpu_mm *mm) 1492 { 1493 struct intel_vgpu *vgpu = mm->vgpu; 1494 struct intel_gvt *gvt = vgpu->gvt; 1495 struct intel_gvt_gtt *gtt = &gvt->gtt; 1496 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1497 struct intel_gvt_gtt_entry se; 1498 int i; 1499 1500 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed)) 1501 return; 1502 1503 for (i = 0; i < mm->page_table_entry_cnt; i++) { 1504 ppgtt_get_shadow_root_entry(mm, &se, i); 1505 if (!ops->test_present(&se)) 1506 continue; 1507 ppgtt_invalidate_shadow_page_by_shadow_entry( 1508 vgpu, &se); 1509 se.val64 = 0; 1510 ppgtt_set_shadow_root_entry(mm, &se, i); 1511 1512 trace_gpt_change(vgpu->id, "destroy root pointer", 1513 NULL, se.type, se.val64, i); 1514 } 1515 mm->shadowed = false; 1516 } 1517 1518 /** 1519 * intel_vgpu_destroy_mm - destroy a mm object 1520 * @mm: a kref object 1521 * 1522 * This function is used to destroy a mm object for vGPU 1523 * 1524 */ 1525 void intel_vgpu_destroy_mm(struct kref *mm_ref) 1526 { 1527 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1528 struct intel_vgpu *vgpu = mm->vgpu; 1529 struct intel_gvt *gvt = vgpu->gvt; 1530 struct intel_gvt_gtt *gtt = &gvt->gtt; 1531 1532 if (!mm->initialized) 1533 goto out; 1534 1535 list_del(&mm->list); 1536 list_del(&mm->lru_list); 1537 1538 if (mm->has_shadow_page_table) 1539 invalidate_mm(mm); 1540 1541 gtt->mm_free_page_table(mm); 1542 out: 1543 kfree(mm); 1544 } 1545 1546 static int shadow_mm(struct intel_vgpu_mm *mm) 1547 { 1548 struct intel_vgpu *vgpu = mm->vgpu; 1549 struct intel_gvt *gvt = vgpu->gvt; 1550 struct intel_gvt_gtt *gtt = &gvt->gtt; 1551 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1552 struct intel_vgpu_ppgtt_spt *spt; 1553 struct intel_gvt_gtt_entry ge, se; 1554 int i; 1555 int ret; 1556 1557 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed)) 1558 return 0; 1559 1560 mm->shadowed = true; 1561 1562 for (i = 0; i < mm->page_table_entry_cnt; i++) { 1563 ppgtt_get_guest_root_entry(mm, &ge, i); 1564 if (!ops->test_present(&ge)) 1565 continue; 1566 1567 trace_gpt_change(vgpu->id, __func__, NULL, 1568 ge.type, ge.val64, i); 1569 1570 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1571 if (IS_ERR(spt)) { 1572 gvt_vgpu_err("fail to populate guest root pointer\n"); 1573 ret = PTR_ERR(spt); 1574 goto fail; 1575 } 1576 ppgtt_generate_shadow_entry(&se, spt, &ge); 1577 ppgtt_set_shadow_root_entry(mm, &se, i); 1578 1579 trace_gpt_change(vgpu->id, "populate root pointer", 1580 NULL, se.type, se.val64, i); 1581 } 1582 return 0; 1583 fail: 1584 invalidate_mm(mm); 1585 return ret; 1586 } 1587 1588 /** 1589 * intel_vgpu_create_mm - create a mm object for a vGPU 1590 * @vgpu: a vGPU 1591 * @mm_type: mm object type, should be PPGTT or GGTT 1592 * @virtual_page_table: page table root pointers. Could be NULL if user wants 1593 * to populate shadow later. 1594 * @page_table_level: describe the page table level of the mm object 1595 * @pde_base_index: pde root pointer base in GGTT MMIO. 1596 * 1597 * This function is used to create a mm object for a vGPU. 1598 * 1599 * Returns: 1600 * Zero on success, negative error code in pointer if failed. 1601 */ 1602 struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, 1603 int mm_type, void *virtual_page_table, int page_table_level, 1604 u32 pde_base_index) 1605 { 1606 struct intel_gvt *gvt = vgpu->gvt; 1607 struct intel_gvt_gtt *gtt = &gvt->gtt; 1608 struct intel_vgpu_mm *mm; 1609 int ret; 1610 1611 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1612 if (!mm) { 1613 ret = -ENOMEM; 1614 goto fail; 1615 } 1616 1617 mm->type = mm_type; 1618 1619 if (page_table_level == 1) 1620 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE; 1621 else if (page_table_level == 3) 1622 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1623 else if (page_table_level == 4) 1624 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1625 else { 1626 WARN_ON(1); 1627 ret = -EINVAL; 1628 goto fail; 1629 } 1630 1631 mm->page_table_level = page_table_level; 1632 mm->pde_base_index = pde_base_index; 1633 1634 mm->vgpu = vgpu; 1635 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT); 1636 1637 kref_init(&mm->ref); 1638 atomic_set(&mm->pincount, 0); 1639 INIT_LIST_HEAD(&mm->list); 1640 INIT_LIST_HEAD(&mm->lru_list); 1641 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head); 1642 1643 ret = gtt->mm_alloc_page_table(mm); 1644 if (ret) { 1645 gvt_vgpu_err("fail to allocate page table for mm\n"); 1646 goto fail; 1647 } 1648 1649 mm->initialized = true; 1650 1651 if (virtual_page_table) 1652 memcpy(mm->virtual_page_table, virtual_page_table, 1653 mm->page_table_entry_size); 1654 1655 if (mm->has_shadow_page_table) { 1656 ret = shadow_mm(mm); 1657 if (ret) 1658 goto fail; 1659 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head); 1660 } 1661 return mm; 1662 fail: 1663 gvt_vgpu_err("fail to create mm\n"); 1664 if (mm) 1665 intel_gvt_mm_unreference(mm); 1666 return ERR_PTR(ret); 1667 } 1668 1669 /** 1670 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object 1671 * @mm: a vGPU mm object 1672 * 1673 * This function is called when user doesn't want to use a vGPU mm object 1674 */ 1675 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1676 { 1677 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) 1678 return; 1679 1680 atomic_dec(&mm->pincount); 1681 } 1682 1683 /** 1684 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object 1685 * @vgpu: a vGPU 1686 * 1687 * This function is called when user wants to use a vGPU mm object. If this 1688 * mm object hasn't been shadowed yet, the shadow will be populated at this 1689 * time. 1690 * 1691 * Returns: 1692 * Zero on success, negative error code if failed. 1693 */ 1694 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) 1695 { 1696 int ret; 1697 1698 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) 1699 return 0; 1700 1701 if (!mm->shadowed) { 1702 ret = shadow_mm(mm); 1703 if (ret) 1704 return ret; 1705 } 1706 1707 atomic_inc(&mm->pincount); 1708 list_del_init(&mm->lru_list); 1709 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head); 1710 return 0; 1711 } 1712 1713 static int reclaim_one_mm(struct intel_gvt *gvt) 1714 { 1715 struct intel_vgpu_mm *mm; 1716 struct list_head *pos, *n; 1717 1718 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) { 1719 mm = container_of(pos, struct intel_vgpu_mm, lru_list); 1720 1721 if (mm->type != INTEL_GVT_MM_PPGTT) 1722 continue; 1723 if (atomic_read(&mm->pincount)) 1724 continue; 1725 1726 list_del_init(&mm->lru_list); 1727 invalidate_mm(mm); 1728 return 1; 1729 } 1730 return 0; 1731 } 1732 1733 /* 1734 * GMA translation APIs. 1735 */ 1736 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, 1737 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) 1738 { 1739 struct intel_vgpu *vgpu = mm->vgpu; 1740 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1741 struct intel_vgpu_ppgtt_spt *s; 1742 1743 if (WARN_ON(!mm->has_shadow_page_table)) 1744 return -EINVAL; 1745 1746 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 1747 if (!s) 1748 return -ENXIO; 1749 1750 if (!guest) 1751 ppgtt_get_shadow_entry(s, e, index); 1752 else 1753 ppgtt_get_guest_entry(s, e, index); 1754 return 0; 1755 } 1756 1757 /** 1758 * intel_vgpu_gma_to_gpa - translate a gma to GPA 1759 * @mm: mm object. could be a PPGTT or GGTT mm object 1760 * @gma: graphics memory address in this mm object 1761 * 1762 * This function is used to translate a graphics memory address in specific 1763 * graphics memory space to guest physical address. 1764 * 1765 * Returns: 1766 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed. 1767 */ 1768 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) 1769 { 1770 struct intel_vgpu *vgpu = mm->vgpu; 1771 struct intel_gvt *gvt = vgpu->gvt; 1772 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; 1773 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; 1774 unsigned long gpa = INTEL_GVT_INVALID_ADDR; 1775 unsigned long gma_index[4]; 1776 struct intel_gvt_gtt_entry e; 1777 int i, index; 1778 int ret; 1779 1780 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT) 1781 return INTEL_GVT_INVALID_ADDR; 1782 1783 if (mm->type == INTEL_GVT_MM_GGTT) { 1784 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1785 goto err; 1786 1787 ret = ggtt_get_guest_entry(mm, &e, 1788 gma_ops->gma_to_ggtt_pte_index(gma)); 1789 if (ret) 1790 goto err; 1791 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 1792 + (gma & ~I915_GTT_PAGE_MASK); 1793 1794 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); 1795 return gpa; 1796 } 1797 1798 switch (mm->page_table_level) { 1799 case 4: 1800 ret = ppgtt_get_shadow_root_entry(mm, &e, 0); 1801 if (ret) 1802 goto err; 1803 gma_index[0] = gma_ops->gma_to_pml4_index(gma); 1804 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 1805 gma_index[2] = gma_ops->gma_to_pde_index(gma); 1806 gma_index[3] = gma_ops->gma_to_pte_index(gma); 1807 index = 4; 1808 break; 1809 case 3: 1810 ret = ppgtt_get_shadow_root_entry(mm, &e, 1811 gma_ops->gma_to_l3_pdp_index(gma)); 1812 if (ret) 1813 goto err; 1814 gma_index[0] = gma_ops->gma_to_pde_index(gma); 1815 gma_index[1] = gma_ops->gma_to_pte_index(gma); 1816 index = 2; 1817 break; 1818 case 2: 1819 ret = ppgtt_get_shadow_root_entry(mm, &e, 1820 gma_ops->gma_to_pde_index(gma)); 1821 if (ret) 1822 goto err; 1823 gma_index[0] = gma_ops->gma_to_pte_index(gma); 1824 index = 1; 1825 break; 1826 default: 1827 WARN_ON(1); 1828 goto err; 1829 } 1830 1831 /* walk into the shadow page table and get gpa from guest entry */ 1832 for (i = 0; i < index; i++) { 1833 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 1834 (i == index - 1)); 1835 if (ret) 1836 goto err; 1837 1838 if (!pte_ops->test_present(&e)) { 1839 gvt_dbg_core("GMA 0x%lx is not present\n", gma); 1840 goto err; 1841 } 1842 } 1843 1844 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 1845 + (gma & ~I915_GTT_PAGE_MASK); 1846 1847 trace_gma_translate(vgpu->id, "ppgtt", 0, 1848 mm->page_table_level, gma, gpa); 1849 return gpa; 1850 err: 1851 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1852 return INTEL_GVT_INVALID_ADDR; 1853 } 1854 1855 static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu, 1856 unsigned int off, void *p_data, unsigned int bytes) 1857 { 1858 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1859 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1860 unsigned long index = off >> info->gtt_entry_size_shift; 1861 struct intel_gvt_gtt_entry e; 1862 1863 if (bytes != 4 && bytes != 8) 1864 return -EINVAL; 1865 1866 ggtt_get_guest_entry(ggtt_mm, &e, index); 1867 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), 1868 bytes); 1869 return 0; 1870 } 1871 1872 /** 1873 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read 1874 * @vgpu: a vGPU 1875 * @off: register offset 1876 * @p_data: data will be returned to guest 1877 * @bytes: data length 1878 * 1879 * This function is used to emulate the GTT MMIO register read 1880 * 1881 * Returns: 1882 * Zero on success, error code if failed. 1883 */ 1884 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 1885 void *p_data, unsigned int bytes) 1886 { 1887 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1888 int ret; 1889 1890 if (bytes != 4 && bytes != 8) 1891 return -EINVAL; 1892 1893 off -= info->gtt_start_offset; 1894 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes); 1895 return ret; 1896 } 1897 1898 static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1899 void *p_data, unsigned int bytes) 1900 { 1901 struct intel_gvt *gvt = vgpu->gvt; 1902 const struct intel_gvt_device_info *info = &gvt->device_info; 1903 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1904 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1905 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 1906 unsigned long gma; 1907 struct intel_gvt_gtt_entry e, m; 1908 int ret; 1909 1910 if (bytes != 4 && bytes != 8) 1911 return -EINVAL; 1912 1913 gma = g_gtt_index << I915_GTT_PAGE_SHIFT; 1914 1915 /* the VM may configure the whole GM space when ballooning is used */ 1916 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1917 return 0; 1918 1919 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 1920 1921 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1922 bytes); 1923 1924 if (ops->test_present(&e)) { 1925 ret = gtt_entry_p2m(vgpu, &e, &m); 1926 if (ret) { 1927 gvt_vgpu_err("fail to translate guest gtt entry\n"); 1928 /* guest driver may read/write the entry when partial 1929 * update the entry in this situation p2m will fail 1930 * settting the shadow entry to point to a scratch page 1931 */ 1932 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1933 } 1934 } else { 1935 m = e; 1936 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1937 } 1938 1939 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1940 gtt_invalidate(gvt->dev_priv); 1941 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 1942 return 0; 1943 } 1944 1945 /* 1946 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write 1947 * @vgpu: a vGPU 1948 * @off: register offset 1949 * @p_data: data from guest write 1950 * @bytes: data length 1951 * 1952 * This function is used to emulate the GTT MMIO register write 1953 * 1954 * Returns: 1955 * Zero on success, error code if failed. 1956 */ 1957 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1958 void *p_data, unsigned int bytes) 1959 { 1960 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1961 int ret; 1962 1963 if (bytes != 4 && bytes != 8) 1964 return -EINVAL; 1965 1966 off -= info->gtt_start_offset; 1967 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes); 1968 return ret; 1969 } 1970 1971 static int alloc_scratch_pages(struct intel_vgpu *vgpu, 1972 intel_gvt_gtt_type_t type) 1973 { 1974 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 1975 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1976 int page_entry_num = I915_GTT_PAGE_SIZE >> 1977 vgpu->gvt->device_info.gtt_entry_size_shift; 1978 void *scratch_pt; 1979 int i; 1980 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 1981 dma_addr_t daddr; 1982 1983 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1984 return -EINVAL; 1985 1986 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1987 if (!scratch_pt) { 1988 gvt_vgpu_err("fail to allocate scratch page\n"); 1989 return -ENOMEM; 1990 } 1991 1992 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1993 4096, PCI_DMA_BIDIRECTIONAL); 1994 if (dma_mapping_error(dev, daddr)) { 1995 gvt_vgpu_err("fail to dmamap scratch_pt\n"); 1996 __free_page(virt_to_page(scratch_pt)); 1997 return -ENOMEM; 1998 } 1999 gtt->scratch_pt[type].page_mfn = 2000 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2001 gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 2002 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 2003 vgpu->id, type, gtt->scratch_pt[type].page_mfn); 2004 2005 /* Build the tree by full filled the scratch pt with the entries which 2006 * point to the next level scratch pt or scratch page. The 2007 * scratch_pt[type] indicate the scratch pt/scratch page used by the 2008 * 'type' pt. 2009 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 2010 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 2011 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 2012 */ 2013 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 2014 struct intel_gvt_gtt_entry se; 2015 2016 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); 2017 se.type = get_entry_type(type - 1); 2018 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); 2019 2020 /* The entry parameters like present/writeable/cache type 2021 * set to the same as i915's scratch page tree. 2022 */ 2023 se.val64 |= _PAGE_PRESENT | _PAGE_RW; 2024 if (type == GTT_TYPE_PPGTT_PDE_PT) 2025 se.val64 |= PPAT_CACHED; 2026 2027 for (i = 0; i < page_entry_num; i++) 2028 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 2029 } 2030 2031 return 0; 2032 } 2033 2034 static int release_scratch_page_tree(struct intel_vgpu *vgpu) 2035 { 2036 int i; 2037 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 2038 dma_addr_t daddr; 2039 2040 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2041 if (vgpu->gtt.scratch_pt[i].page != NULL) { 2042 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << 2043 I915_GTT_PAGE_SHIFT); 2044 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2045 __free_page(vgpu->gtt.scratch_pt[i].page); 2046 vgpu->gtt.scratch_pt[i].page = NULL; 2047 vgpu->gtt.scratch_pt[i].page_mfn = 0; 2048 } 2049 } 2050 2051 return 0; 2052 } 2053 2054 static int create_scratch_page_tree(struct intel_vgpu *vgpu) 2055 { 2056 int i, ret; 2057 2058 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2059 ret = alloc_scratch_pages(vgpu, i); 2060 if (ret) 2061 goto err; 2062 } 2063 2064 return 0; 2065 2066 err: 2067 release_scratch_page_tree(vgpu); 2068 return ret; 2069 } 2070 2071 /** 2072 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization 2073 * @vgpu: a vGPU 2074 * 2075 * This function is used to initialize per-vGPU graphics memory virtualization 2076 * components. 2077 * 2078 * Returns: 2079 * Zero on success, error code if failed. 2080 */ 2081 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) 2082 { 2083 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2084 struct intel_vgpu_mm *ggtt_mm; 2085 2086 hash_init(gtt->tracked_guest_page_hash_table); 2087 hash_init(gtt->shadow_page_hash_table); 2088 2089 INIT_LIST_HEAD(>t->mm_list_head); 2090 INIT_LIST_HEAD(>t->oos_page_list_head); 2091 INIT_LIST_HEAD(>t->post_shadow_list_head); 2092 2093 intel_vgpu_reset_ggtt(vgpu); 2094 2095 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2096 NULL, 1, 0); 2097 if (IS_ERR(ggtt_mm)) { 2098 gvt_vgpu_err("fail to create mm for ggtt.\n"); 2099 return PTR_ERR(ggtt_mm); 2100 } 2101 2102 gtt->ggtt_mm = ggtt_mm; 2103 2104 return create_scratch_page_tree(vgpu); 2105 } 2106 2107 static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) 2108 { 2109 struct list_head *pos, *n; 2110 struct intel_vgpu_mm *mm; 2111 2112 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { 2113 mm = container_of(pos, struct intel_vgpu_mm, list); 2114 if (mm->type == type) { 2115 vgpu->gvt->gtt.mm_free_page_table(mm); 2116 list_del(&mm->list); 2117 list_del(&mm->lru_list); 2118 kfree(mm); 2119 } 2120 } 2121 } 2122 2123 /** 2124 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2125 * @vgpu: a vGPU 2126 * 2127 * This function is used to clean up per-vGPU graphics memory virtualization 2128 * components. 2129 * 2130 * Returns: 2131 * Zero on success, error code if failed. 2132 */ 2133 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2134 { 2135 ppgtt_free_all_shadow_page(vgpu); 2136 release_scratch_page_tree(vgpu); 2137 2138 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2139 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); 2140 } 2141 2142 static void clean_spt_oos(struct intel_gvt *gvt) 2143 { 2144 struct intel_gvt_gtt *gtt = &gvt->gtt; 2145 struct list_head *pos, *n; 2146 struct intel_vgpu_oos_page *oos_page; 2147 2148 WARN(!list_empty(>t->oos_page_use_list_head), 2149 "someone is still using oos page\n"); 2150 2151 list_for_each_safe(pos, n, >t->oos_page_free_list_head) { 2152 oos_page = container_of(pos, struct intel_vgpu_oos_page, list); 2153 list_del(&oos_page->list); 2154 kfree(oos_page); 2155 } 2156 } 2157 2158 static int setup_spt_oos(struct intel_gvt *gvt) 2159 { 2160 struct intel_gvt_gtt *gtt = &gvt->gtt; 2161 struct intel_vgpu_oos_page *oos_page; 2162 int i; 2163 int ret; 2164 2165 INIT_LIST_HEAD(>t->oos_page_free_list_head); 2166 INIT_LIST_HEAD(>t->oos_page_use_list_head); 2167 2168 for (i = 0; i < preallocated_oos_pages; i++) { 2169 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2170 if (!oos_page) { 2171 ret = -ENOMEM; 2172 goto fail; 2173 } 2174 2175 INIT_LIST_HEAD(&oos_page->list); 2176 INIT_LIST_HEAD(&oos_page->vm_list); 2177 oos_page->id = i; 2178 list_add_tail(&oos_page->list, >t->oos_page_free_list_head); 2179 } 2180 2181 gvt_dbg_mm("%d oos pages preallocated\n", i); 2182 2183 return 0; 2184 fail: 2185 clean_spt_oos(gvt); 2186 return ret; 2187 } 2188 2189 /** 2190 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object 2191 * @vgpu: a vGPU 2192 * @page_table_level: PPGTT page table level 2193 * @root_entry: PPGTT page table root pointers 2194 * 2195 * This function is used to find a PPGTT mm object from mm object pool 2196 * 2197 * Returns: 2198 * pointer to mm object on success, NULL if failed. 2199 */ 2200 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 2201 int page_table_level, void *root_entry) 2202 { 2203 struct list_head *pos; 2204 struct intel_vgpu_mm *mm; 2205 u64 *src, *dst; 2206 2207 list_for_each(pos, &vgpu->gtt.mm_list_head) { 2208 mm = container_of(pos, struct intel_vgpu_mm, list); 2209 if (mm->type != INTEL_GVT_MM_PPGTT) 2210 continue; 2211 2212 if (mm->page_table_level != page_table_level) 2213 continue; 2214 2215 src = root_entry; 2216 dst = mm->virtual_page_table; 2217 2218 if (page_table_level == 3) { 2219 if (src[0] == dst[0] 2220 && src[1] == dst[1] 2221 && src[2] == dst[2] 2222 && src[3] == dst[3]) 2223 return mm; 2224 } else { 2225 if (src[0] == dst[0]) 2226 return mm; 2227 } 2228 } 2229 return NULL; 2230 } 2231 2232 /** 2233 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from 2234 * g2v notification 2235 * @vgpu: a vGPU 2236 * @page_table_level: PPGTT page table level 2237 * 2238 * This function is used to create a PPGTT mm object from a guest to GVT-g 2239 * notification. 2240 * 2241 * Returns: 2242 * Zero on success, negative error code if failed. 2243 */ 2244 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, 2245 int page_table_level) 2246 { 2247 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); 2248 struct intel_vgpu_mm *mm; 2249 2250 if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) 2251 return -EINVAL; 2252 2253 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2254 if (mm) { 2255 intel_gvt_mm_reference(mm); 2256 } else { 2257 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2258 pdp, page_table_level, 0); 2259 if (IS_ERR(mm)) { 2260 gvt_vgpu_err("fail to create mm\n"); 2261 return PTR_ERR(mm); 2262 } 2263 } 2264 return 0; 2265 } 2266 2267 /** 2268 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from 2269 * g2v notification 2270 * @vgpu: a vGPU 2271 * @page_table_level: PPGTT page table level 2272 * 2273 * This function is used to create a PPGTT mm object from a guest to GVT-g 2274 * notification. 2275 * 2276 * Returns: 2277 * Zero on success, negative error code if failed. 2278 */ 2279 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, 2280 int page_table_level) 2281 { 2282 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); 2283 struct intel_vgpu_mm *mm; 2284 2285 if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) 2286 return -EINVAL; 2287 2288 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2289 if (!mm) { 2290 gvt_vgpu_err("fail to find ppgtt instance.\n"); 2291 return -EINVAL; 2292 } 2293 intel_gvt_mm_unreference(mm); 2294 return 0; 2295 } 2296 2297 /** 2298 * intel_gvt_init_gtt - initialize mm components of a GVT device 2299 * @gvt: GVT device 2300 * 2301 * This function is called at the initialization stage, to initialize 2302 * the mm components of a GVT device. 2303 * 2304 * Returns: 2305 * zero on success, negative error code if failed. 2306 */ 2307 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2308 { 2309 int ret; 2310 void *page; 2311 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2312 dma_addr_t daddr; 2313 2314 gvt_dbg_core("init gtt\n"); 2315 2316 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) 2317 || IS_KABYLAKE(gvt->dev_priv)) { 2318 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2319 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2320 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; 2321 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table; 2322 } else { 2323 return -ENODEV; 2324 } 2325 2326 page = (void *)get_zeroed_page(GFP_KERNEL); 2327 if (!page) { 2328 gvt_err("fail to allocate scratch ggtt page\n"); 2329 return -ENOMEM; 2330 } 2331 2332 daddr = dma_map_page(dev, virt_to_page(page), 0, 2333 4096, PCI_DMA_BIDIRECTIONAL); 2334 if (dma_mapping_error(dev, daddr)) { 2335 gvt_err("fail to dmamap scratch ggtt page\n"); 2336 __free_page(virt_to_page(page)); 2337 return -ENOMEM; 2338 } 2339 2340 gvt->gtt.scratch_page = virt_to_page(page); 2341 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2342 2343 if (enable_out_of_sync) { 2344 ret = setup_spt_oos(gvt); 2345 if (ret) { 2346 gvt_err("fail to initialize SPT oos\n"); 2347 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2348 __free_page(gvt->gtt.scratch_page); 2349 return ret; 2350 } 2351 } 2352 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head); 2353 return 0; 2354 } 2355 2356 /** 2357 * intel_gvt_clean_gtt - clean up mm components of a GVT device 2358 * @gvt: GVT device 2359 * 2360 * This function is called at the driver unloading stage, to clean up the 2361 * the mm components of a GVT device. 2362 * 2363 */ 2364 void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2365 { 2366 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2367 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << 2368 I915_GTT_PAGE_SHIFT); 2369 2370 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2371 2372 __free_page(gvt->gtt.scratch_page); 2373 2374 if (enable_out_of_sync) 2375 clean_spt_oos(gvt); 2376 } 2377 2378 /** 2379 * intel_vgpu_reset_ggtt - reset the GGTT entry 2380 * @vgpu: a vGPU 2381 * 2382 * This function is called at the vGPU create stage 2383 * to reset all the GGTT entries. 2384 * 2385 */ 2386 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2387 { 2388 struct intel_gvt *gvt = vgpu->gvt; 2389 struct drm_i915_private *dev_priv = gvt->dev_priv; 2390 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2391 u32 index; 2392 u32 offset; 2393 u32 num_entries; 2394 struct intel_gvt_gtt_entry e; 2395 2396 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); 2397 e.type = GTT_TYPE_GGTT_PTE; 2398 ops->set_pfn(&e, gvt->gtt.scratch_mfn); 2399 e.val64 |= _PAGE_PRESENT; 2400 2401 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2402 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2403 for (offset = 0; offset < num_entries; offset++) 2404 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2405 2406 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2407 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2408 for (offset = 0; offset < num_entries; offset++) 2409 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2410 2411 gtt_invalidate(dev_priv); 2412 } 2413 2414 /** 2415 * intel_vgpu_reset_gtt - reset the all GTT related status 2416 * @vgpu: a vGPU 2417 * 2418 * This function is called from vfio core to reset reset all 2419 * GTT related status, including GGTT, PPGTT, scratch page. 2420 * 2421 */ 2422 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) 2423 { 2424 ppgtt_free_all_shadow_page(vgpu); 2425 2426 /* Shadow pages are only created when there is no page 2427 * table tracking data, so remove page tracking data after 2428 * removing the shadow pages. 2429 */ 2430 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2431 2432 intel_vgpu_reset_ggtt(vgpu); 2433 } 2434