1 /* 2 * GTT virtualization 3 * 4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Zhi Wang <zhi.a.wang@intel.com> 27 * Zhenyu Wang <zhenyuw@linux.intel.com> 28 * Xiao Zheng <xiao.zheng@intel.com> 29 * 30 * Contributors: 31 * Min He <min.he@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 #include "i915_pvinfo.h" 39 #include "trace.h" 40 41 #if defined(VERBOSE_DEBUG) 42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) 43 #else 44 #define gvt_vdbg_mm(fmt, args...) 45 #endif 46 47 static bool enable_out_of_sync = false; 48 static int preallocated_oos_pages = 8192; 49 50 /* 51 * validate a gm address and related range size, 52 * translate it to host gm address 53 */ 54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 55 { 56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", 59 addr, size); 60 return false; 61 } 62 return true; 63 } 64 65 /* translate a guest gmadr to host gmadr */ 66 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) 67 { 68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), 69 "invalid guest gmadr %llx\n", g_addr)) 70 return -EACCES; 71 72 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) 73 *h_addr = vgpu_aperture_gmadr_base(vgpu) 74 + (g_addr - vgpu_aperture_offset(vgpu)); 75 else 76 *h_addr = vgpu_hidden_gmadr_base(vgpu) 77 + (g_addr - vgpu_hidden_offset(vgpu)); 78 return 0; 79 } 80 81 /* translate a host gmadr to guest gmadr */ 82 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) 83 { 84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), 85 "invalid host gmadr %llx\n", h_addr)) 86 return -EACCES; 87 88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) 89 *g_addr = vgpu_aperture_gmadr_base(vgpu) 90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); 91 else 92 *g_addr = vgpu_hidden_gmadr_base(vgpu) 93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); 94 return 0; 95 } 96 97 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 98 unsigned long *h_index) 99 { 100 u64 h_addr; 101 int ret; 102 103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, 104 &h_addr); 105 if (ret) 106 return ret; 107 108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT; 109 return 0; 110 } 111 112 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 113 unsigned long *g_index) 114 { 115 u64 g_addr; 116 int ret; 117 118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, 119 &g_addr); 120 if (ret) 121 return ret; 122 123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT; 124 return 0; 125 } 126 127 #define gtt_type_is_entry(type) \ 128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ 129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \ 130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY) 131 132 #define gtt_type_is_pt(type) \ 133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) 134 135 #define gtt_type_is_pte_pt(type) \ 136 (type == GTT_TYPE_PPGTT_PTE_PT) 137 138 #define gtt_type_is_root_pointer(type) \ 139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY) 140 141 #define gtt_init_entry(e, t, p, v) do { \ 142 (e)->type = t; \ 143 (e)->pdev = p; \ 144 memcpy(&(e)->val64, &v, sizeof(v)); \ 145 } while (0) 146 147 /* 148 * Mappings between GTT_TYPE* enumerations. 149 * Following information can be found according to the given type: 150 * - type of next level page table 151 * - type of entry inside this level page table 152 * - type of entry with PSE set 153 * 154 * If the given type doesn't have such a kind of information, 155 * e.g. give a l4 root entry type, then request to get its PSE type, 156 * give a PTE page table type, then request to get its next level page 157 * table type, as we know l4 root entry doesn't have a PSE bit, 158 * and a PTE page table doesn't have a next level page table type, 159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a 160 * page table. 161 */ 162 163 struct gtt_type_table_entry { 164 int entry_type; 165 int pt_type; 166 int next_pt_type; 167 int pse_entry_type; 168 }; 169 170 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ 171 [type] = { \ 172 .entry_type = e_type, \ 173 .pt_type = cpt_type, \ 174 .next_pt_type = npt_type, \ 175 .pse_entry_type = pse_type, \ 176 } 177 178 static struct gtt_type_table_entry gtt_type_table[] = { 179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 181 GTT_TYPE_INVALID, 182 GTT_TYPE_PPGTT_PML4_PT, 183 GTT_TYPE_INVALID), 184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, 185 GTT_TYPE_PPGTT_PML4_ENTRY, 186 GTT_TYPE_PPGTT_PML4_PT, 187 GTT_TYPE_PPGTT_PDP_PT, 188 GTT_TYPE_INVALID), 189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, 190 GTT_TYPE_PPGTT_PML4_ENTRY, 191 GTT_TYPE_PPGTT_PML4_PT, 192 GTT_TYPE_PPGTT_PDP_PT, 193 GTT_TYPE_INVALID), 194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, 195 GTT_TYPE_PPGTT_PDP_ENTRY, 196 GTT_TYPE_PPGTT_PDP_PT, 197 GTT_TYPE_PPGTT_PDE_PT, 198 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 201 GTT_TYPE_INVALID, 202 GTT_TYPE_PPGTT_PDE_PT, 203 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, 205 GTT_TYPE_PPGTT_PDP_ENTRY, 206 GTT_TYPE_PPGTT_PDP_PT, 207 GTT_TYPE_PPGTT_PDE_PT, 208 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, 210 GTT_TYPE_PPGTT_PDE_ENTRY, 211 GTT_TYPE_PPGTT_PDE_PT, 212 GTT_TYPE_PPGTT_PTE_PT, 213 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, 215 GTT_TYPE_PPGTT_PDE_ENTRY, 216 GTT_TYPE_PPGTT_PDE_PT, 217 GTT_TYPE_PPGTT_PTE_PT, 218 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, 220 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 221 GTT_TYPE_PPGTT_PTE_PT, 222 GTT_TYPE_INVALID, 223 GTT_TYPE_INVALID), 224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, 225 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 226 GTT_TYPE_PPGTT_PTE_PT, 227 GTT_TYPE_INVALID, 228 GTT_TYPE_INVALID), 229 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, 230 GTT_TYPE_PPGTT_PDE_ENTRY, 231 GTT_TYPE_PPGTT_PDE_PT, 232 GTT_TYPE_INVALID, 233 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 234 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, 235 GTT_TYPE_PPGTT_PDP_ENTRY, 236 GTT_TYPE_PPGTT_PDP_PT, 237 GTT_TYPE_INVALID, 238 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 239 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, 240 GTT_TYPE_GGTT_PTE, 241 GTT_TYPE_INVALID, 242 GTT_TYPE_INVALID, 243 GTT_TYPE_INVALID), 244 }; 245 246 static inline int get_next_pt_type(int type) 247 { 248 return gtt_type_table[type].next_pt_type; 249 } 250 251 static inline int get_pt_type(int type) 252 { 253 return gtt_type_table[type].pt_type; 254 } 255 256 static inline int get_entry_type(int type) 257 { 258 return gtt_type_table[type].entry_type; 259 } 260 261 static inline int get_pse_type(int type) 262 { 263 return gtt_type_table[type].pse_entry_type; 264 } 265 266 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 267 { 268 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 269 270 return readq(addr); 271 } 272 273 static void ggtt_invalidate(struct drm_i915_private *dev_priv) 274 { 275 mmio_hw_access_pre(dev_priv); 276 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 277 mmio_hw_access_post(dev_priv); 278 } 279 280 static void write_pte64(struct drm_i915_private *dev_priv, 281 unsigned long index, u64 pte) 282 { 283 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 284 285 writeq(pte, addr); 286 } 287 288 static inline int gtt_get_entry64(void *pt, 289 struct intel_gvt_gtt_entry *e, 290 unsigned long index, bool hypervisor_access, unsigned long gpa, 291 struct intel_vgpu *vgpu) 292 { 293 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 294 int ret; 295 296 if (WARN_ON(info->gtt_entry_size != 8)) 297 return -EINVAL; 298 299 if (hypervisor_access) { 300 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + 301 (index << info->gtt_entry_size_shift), 302 &e->val64, 8); 303 if (WARN_ON(ret)) 304 return ret; 305 } else if (!pt) { 306 e->val64 = read_pte64(vgpu->gvt->dev_priv, index); 307 } else { 308 e->val64 = *((u64 *)pt + index); 309 } 310 return 0; 311 } 312 313 static inline int gtt_set_entry64(void *pt, 314 struct intel_gvt_gtt_entry *e, 315 unsigned long index, bool hypervisor_access, unsigned long gpa, 316 struct intel_vgpu *vgpu) 317 { 318 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 319 int ret; 320 321 if (WARN_ON(info->gtt_entry_size != 8)) 322 return -EINVAL; 323 324 if (hypervisor_access) { 325 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + 326 (index << info->gtt_entry_size_shift), 327 &e->val64, 8); 328 if (WARN_ON(ret)) 329 return ret; 330 } else if (!pt) { 331 write_pte64(vgpu->gvt->dev_priv, index, e->val64); 332 } else { 333 *((u64 *)pt + index) = e->val64; 334 } 335 return 0; 336 } 337 338 #define GTT_HAW 46 339 340 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) 341 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) 342 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) 343 344 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 345 { 346 unsigned long pfn; 347 348 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) 349 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; 350 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) 351 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; 352 else 353 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; 354 return pfn; 355 } 356 357 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) 358 { 359 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 360 e->val64 &= ~ADDR_1G_MASK; 361 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT); 362 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { 363 e->val64 &= ~ADDR_2M_MASK; 364 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); 365 } else { 366 e->val64 &= ~ADDR_4K_MASK; 367 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); 368 } 369 370 e->val64 |= (pfn << PAGE_SHIFT); 371 } 372 373 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) 374 { 375 /* Entry doesn't have PSE bit. */ 376 if (get_pse_type(e->type) == GTT_TYPE_INVALID) 377 return false; 378 379 e->type = get_entry_type(e->type); 380 if (!(e->val64 & _PAGE_PSE)) 381 return false; 382 383 e->type = get_pse_type(e->type); 384 return true; 385 } 386 387 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) 388 { 389 /* 390 * i915 writes PDP root pointer registers without present bit, 391 * it also works, so we need to treat root pointer entry 392 * specifically. 393 */ 394 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY 395 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 396 return (e->val64 != 0); 397 else 398 return (e->val64 & _PAGE_PRESENT); 399 } 400 401 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) 402 { 403 e->val64 &= ~_PAGE_PRESENT; 404 } 405 406 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) 407 { 408 e->val64 |= _PAGE_PRESENT; 409 } 410 411 /* 412 * Per-platform GMA routines. 413 */ 414 static unsigned long gma_to_ggtt_pte_index(unsigned long gma) 415 { 416 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); 417 418 trace_gma_index(__func__, gma, x); 419 return x; 420 } 421 422 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \ 423 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \ 424 { \ 425 unsigned long x = (exp); \ 426 trace_gma_index(__func__, gma, x); \ 427 return x; \ 428 } 429 430 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff)); 431 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff)); 432 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); 433 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); 434 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); 435 436 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { 437 .get_entry = gtt_get_entry64, 438 .set_entry = gtt_set_entry64, 439 .clear_present = gtt_entry_clear_present, 440 .set_present = gtt_entry_set_present, 441 .test_present = gen8_gtt_test_present, 442 .test_pse = gen8_gtt_test_pse, 443 .get_pfn = gen8_gtt_get_pfn, 444 .set_pfn = gen8_gtt_set_pfn, 445 }; 446 447 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { 448 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, 449 .gma_to_pte_index = gen8_gma_to_pte_index, 450 .gma_to_pde_index = gen8_gma_to_pde_index, 451 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index, 452 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index, 453 .gma_to_pml4_index = gen8_gma_to_pml4_index, 454 }; 455 456 /* 457 * MM helpers. 458 */ 459 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, 460 struct intel_gvt_gtt_entry *entry, unsigned long index, 461 bool guest) 462 { 463 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 464 465 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); 466 467 entry->type = mm->ppgtt_mm.root_entry_type; 468 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : 469 mm->ppgtt_mm.shadow_pdps, 470 entry, index, false, 0, mm->vgpu); 471 472 pte_ops->test_pse(entry); 473 } 474 475 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, 476 struct intel_gvt_gtt_entry *entry, unsigned long index) 477 { 478 _ppgtt_get_root_entry(mm, entry, index, true); 479 } 480 481 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, 482 struct intel_gvt_gtt_entry *entry, unsigned long index) 483 { 484 _ppgtt_get_root_entry(mm, entry, index, false); 485 } 486 487 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, 488 struct intel_gvt_gtt_entry *entry, unsigned long index, 489 bool guest) 490 { 491 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 492 493 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : 494 mm->ppgtt_mm.shadow_pdps, 495 entry, index, false, 0, mm->vgpu); 496 } 497 498 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm, 499 struct intel_gvt_gtt_entry *entry, unsigned long index) 500 { 501 _ppgtt_set_root_entry(mm, entry, index, true); 502 } 503 504 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, 505 struct intel_gvt_gtt_entry *entry, unsigned long index) 506 { 507 _ppgtt_set_root_entry(mm, entry, index, false); 508 } 509 510 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, 511 struct intel_gvt_gtt_entry *entry, unsigned long index) 512 { 513 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 514 515 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 516 517 entry->type = GTT_TYPE_GGTT_PTE; 518 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 519 false, 0, mm->vgpu); 520 } 521 522 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, 523 struct intel_gvt_gtt_entry *entry, unsigned long index) 524 { 525 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 526 527 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 528 529 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 530 false, 0, mm->vgpu); 531 } 532 533 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, 534 struct intel_gvt_gtt_entry *entry, unsigned long index) 535 { 536 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 537 538 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 539 540 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); 541 } 542 543 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 544 struct intel_gvt_gtt_entry *entry, unsigned long index) 545 { 546 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 547 548 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 549 550 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); 551 } 552 553 /* 554 * PPGTT shadow page table helpers. 555 */ 556 static inline int ppgtt_spt_get_entry( 557 struct intel_vgpu_ppgtt_spt *spt, 558 void *page_table, int type, 559 struct intel_gvt_gtt_entry *e, unsigned long index, 560 bool guest) 561 { 562 struct intel_gvt *gvt = spt->vgpu->gvt; 563 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 564 int ret; 565 566 e->type = get_entry_type(type); 567 568 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 569 return -EINVAL; 570 571 ret = ops->get_entry(page_table, e, index, guest, 572 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 573 spt->vgpu); 574 if (ret) 575 return ret; 576 577 ops->test_pse(e); 578 579 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 580 type, e->type, index, e->val64); 581 return 0; 582 } 583 584 static inline int ppgtt_spt_set_entry( 585 struct intel_vgpu_ppgtt_spt *spt, 586 void *page_table, int type, 587 struct intel_gvt_gtt_entry *e, unsigned long index, 588 bool guest) 589 { 590 struct intel_gvt *gvt = spt->vgpu->gvt; 591 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 592 593 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 594 return -EINVAL; 595 596 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 597 type, e->type, index, e->val64); 598 599 return ops->set_entry(page_table, e, index, guest, 600 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 601 spt->vgpu); 602 } 603 604 #define ppgtt_get_guest_entry(spt, e, index) \ 605 ppgtt_spt_get_entry(spt, NULL, \ 606 spt->guest_page.type, e, index, true) 607 608 #define ppgtt_set_guest_entry(spt, e, index) \ 609 ppgtt_spt_set_entry(spt, NULL, \ 610 spt->guest_page.type, e, index, true) 611 612 #define ppgtt_get_shadow_entry(spt, e, index) \ 613 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ 614 spt->shadow_page.type, e, index, false) 615 616 #define ppgtt_set_shadow_entry(spt, e, index) \ 617 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ 618 spt->shadow_page.type, e, index, false) 619 620 static void *alloc_spt(gfp_t gfp_mask) 621 { 622 struct intel_vgpu_ppgtt_spt *spt; 623 624 spt = kzalloc(sizeof(*spt), gfp_mask); 625 if (!spt) 626 return NULL; 627 628 spt->shadow_page.page = alloc_page(gfp_mask); 629 if (!spt->shadow_page.page) { 630 kfree(spt); 631 return NULL; 632 } 633 return spt; 634 } 635 636 static void free_spt(struct intel_vgpu_ppgtt_spt *spt) 637 { 638 __free_page(spt->shadow_page.page); 639 kfree(spt); 640 } 641 642 static int detach_oos_page(struct intel_vgpu *vgpu, 643 struct intel_vgpu_oos_page *oos_page); 644 645 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) 646 { 647 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; 648 649 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); 650 651 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, 652 PCI_DMA_BIDIRECTIONAL); 653 654 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); 655 656 if (spt->guest_page.oos_page) 657 detach_oos_page(spt->vgpu, spt->guest_page.oos_page); 658 659 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); 660 661 list_del_init(&spt->post_shadow_list); 662 free_spt(spt); 663 } 664 665 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) 666 { 667 struct intel_vgpu_ppgtt_spt *spt; 668 struct radix_tree_iter iter; 669 void **slot; 670 671 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { 672 spt = radix_tree_deref_slot(slot); 673 ppgtt_free_spt(spt); 674 } 675 } 676 677 static int ppgtt_handle_guest_write_page_table_bytes( 678 struct intel_vgpu_ppgtt_spt *spt, 679 u64 pa, void *p_data, int bytes); 680 681 static int ppgtt_write_protection_handler( 682 struct intel_vgpu_page_track *page_track, 683 u64 gpa, void *data, int bytes) 684 { 685 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data; 686 687 int ret; 688 689 if (bytes != 4 && bytes != 8) 690 return -EINVAL; 691 692 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes); 693 if (ret) 694 return ret; 695 return ret; 696 } 697 698 /* Find a spt by guest gfn. */ 699 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( 700 struct intel_vgpu *vgpu, unsigned long gfn) 701 { 702 struct intel_vgpu_page_track *track; 703 704 track = intel_vgpu_find_page_track(vgpu, gfn); 705 if (track && track->handler == ppgtt_write_protection_handler) 706 return track->priv_data; 707 708 return NULL; 709 } 710 711 /* Find the spt by shadow page mfn. */ 712 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( 713 struct intel_vgpu *vgpu, unsigned long mfn) 714 { 715 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn); 716 } 717 718 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); 719 720 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( 721 struct intel_vgpu *vgpu, int type, unsigned long gfn) 722 { 723 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 724 struct intel_vgpu_ppgtt_spt *spt = NULL; 725 dma_addr_t daddr; 726 int ret; 727 728 retry: 729 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); 730 if (!spt) { 731 if (reclaim_one_ppgtt_mm(vgpu->gvt)) 732 goto retry; 733 734 gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); 735 return ERR_PTR(-ENOMEM); 736 } 737 738 spt->vgpu = vgpu; 739 atomic_set(&spt->refcount, 1); 740 INIT_LIST_HEAD(&spt->post_shadow_list); 741 742 /* 743 * Init shadow_page. 744 */ 745 spt->shadow_page.type = type; 746 daddr = dma_map_page(kdev, spt->shadow_page.page, 747 0, 4096, PCI_DMA_BIDIRECTIONAL); 748 if (dma_mapping_error(kdev, daddr)) { 749 gvt_vgpu_err("fail to map dma addr\n"); 750 ret = -EINVAL; 751 goto err_free_spt; 752 } 753 spt->shadow_page.vaddr = page_address(spt->shadow_page.page); 754 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; 755 756 /* 757 * Init guest_page. 758 */ 759 spt->guest_page.type = type; 760 spt->guest_page.gfn = gfn; 761 762 ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, 763 ppgtt_write_protection_handler, spt); 764 if (ret) 765 goto err_unmap_dma; 766 767 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); 768 if (ret) 769 goto err_unreg_page_track; 770 771 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); 772 return spt; 773 774 err_unreg_page_track: 775 intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn); 776 err_unmap_dma: 777 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 778 err_free_spt: 779 free_spt(spt); 780 return ERR_PTR(ret); 781 } 782 783 #define pt_entry_size_shift(spt) \ 784 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) 785 786 #define pt_entries(spt) \ 787 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) 788 789 #define for_each_present_guest_entry(spt, e, i) \ 790 for (i = 0; i < pt_entries(spt); i++) \ 791 if (!ppgtt_get_guest_entry(spt, e, i) && \ 792 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 793 794 #define for_each_present_shadow_entry(spt, e, i) \ 795 for (i = 0; i < pt_entries(spt); i++) \ 796 if (!ppgtt_get_shadow_entry(spt, e, i) && \ 797 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 798 799 static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) 800 { 801 int v = atomic_read(&spt->refcount); 802 803 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); 804 805 atomic_inc(&spt->refcount); 806 } 807 808 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); 809 810 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, 811 struct intel_gvt_gtt_entry *e) 812 { 813 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 814 struct intel_vgpu_ppgtt_spt *s; 815 intel_gvt_gtt_type_t cur_pt_type; 816 817 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); 818 819 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 820 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 821 cur_pt_type = get_next_pt_type(e->type) + 1; 822 if (ops->get_pfn(e) == 823 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 824 return 0; 825 } 826 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 827 if (!s) { 828 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 829 ops->get_pfn(e)); 830 return -ENXIO; 831 } 832 return ppgtt_invalidate_spt(s); 833 } 834 835 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, 836 struct intel_gvt_gtt_entry *entry) 837 { 838 struct intel_vgpu *vgpu = spt->vgpu; 839 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 840 unsigned long pfn; 841 int type; 842 843 pfn = ops->get_pfn(entry); 844 type = spt->shadow_page.type; 845 846 if (pfn == vgpu->gtt.scratch_pt[type].page_mfn) 847 return; 848 849 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); 850 } 851 852 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) 853 { 854 struct intel_vgpu *vgpu = spt->vgpu; 855 struct intel_gvt_gtt_entry e; 856 unsigned long index; 857 int ret; 858 int v = atomic_read(&spt->refcount); 859 860 trace_spt_change(spt->vgpu->id, "die", spt, 861 spt->guest_page.gfn, spt->shadow_page.type); 862 863 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); 864 865 if (atomic_dec_return(&spt->refcount) > 0) 866 return 0; 867 868 for_each_present_shadow_entry(spt, &e, index) { 869 switch (e.type) { 870 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 871 gvt_vdbg_mm("invalidate 4K entry\n"); 872 ppgtt_invalidate_pte(spt, &e); 873 break; 874 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 875 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 876 WARN(1, "GVT doesn't support 2M/1GB page\n"); 877 continue; 878 case GTT_TYPE_PPGTT_PML4_ENTRY: 879 case GTT_TYPE_PPGTT_PDP_ENTRY: 880 case GTT_TYPE_PPGTT_PDE_ENTRY: 881 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n"); 882 ret = ppgtt_invalidate_spt_by_shadow_entry( 883 spt->vgpu, &e); 884 if (ret) 885 goto fail; 886 break; 887 default: 888 GEM_BUG_ON(1); 889 } 890 } 891 892 trace_spt_change(spt->vgpu->id, "release", spt, 893 spt->guest_page.gfn, spt->shadow_page.type); 894 ppgtt_free_spt(spt); 895 return 0; 896 fail: 897 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", 898 spt, e.val64, e.type); 899 return ret; 900 } 901 902 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); 903 904 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( 905 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) 906 { 907 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 908 struct intel_vgpu_ppgtt_spt *spt = NULL; 909 int ret; 910 911 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); 912 913 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); 914 if (spt) 915 ppgtt_get_spt(spt); 916 else { 917 int type = get_next_pt_type(we->type); 918 919 spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we)); 920 if (IS_ERR(spt)) { 921 ret = PTR_ERR(spt); 922 goto fail; 923 } 924 925 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); 926 if (ret) 927 goto fail; 928 929 ret = ppgtt_populate_spt(spt); 930 if (ret) 931 goto fail; 932 933 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, 934 spt->shadow_page.type); 935 } 936 return spt; 937 fail: 938 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 939 spt, we->val64, we->type); 940 return ERR_PTR(ret); 941 } 942 943 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, 944 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) 945 { 946 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; 947 948 se->type = ge->type; 949 se->val64 = ge->val64; 950 951 ops->set_pfn(se, s->shadow_page.mfn); 952 } 953 954 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, 955 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 956 struct intel_gvt_gtt_entry *ge) 957 { 958 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 959 struct intel_gvt_gtt_entry se = *ge; 960 unsigned long gfn; 961 dma_addr_t dma_addr; 962 int ret; 963 964 if (!pte_ops->test_present(ge)) 965 return 0; 966 967 gfn = pte_ops->get_pfn(ge); 968 969 switch (ge->type) { 970 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 971 gvt_vdbg_mm("shadow 4K gtt entry\n"); 972 break; 973 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 974 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 975 gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); 976 return -EINVAL; 977 default: 978 GEM_BUG_ON(1); 979 }; 980 981 /* direct shadow */ 982 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr); 983 if (ret) 984 return -ENXIO; 985 986 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); 987 ppgtt_set_shadow_entry(spt, &se, index); 988 return 0; 989 } 990 991 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) 992 { 993 struct intel_vgpu *vgpu = spt->vgpu; 994 struct intel_gvt *gvt = vgpu->gvt; 995 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 996 struct intel_vgpu_ppgtt_spt *s; 997 struct intel_gvt_gtt_entry se, ge; 998 unsigned long gfn, i; 999 int ret; 1000 1001 trace_spt_change(spt->vgpu->id, "born", spt, 1002 spt->guest_page.gfn, spt->shadow_page.type); 1003 1004 for_each_present_guest_entry(spt, &ge, i) { 1005 if (gtt_type_is_pt(get_next_pt_type(ge.type))) { 1006 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1007 if (IS_ERR(s)) { 1008 ret = PTR_ERR(s); 1009 goto fail; 1010 } 1011 ppgtt_get_shadow_entry(spt, &se, i); 1012 ppgtt_generate_shadow_entry(&se, s, &ge); 1013 ppgtt_set_shadow_entry(spt, &se, i); 1014 } else { 1015 gfn = ops->get_pfn(&ge); 1016 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { 1017 ops->set_pfn(&se, gvt->gtt.scratch_mfn); 1018 ppgtt_set_shadow_entry(spt, &se, i); 1019 continue; 1020 } 1021 1022 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); 1023 if (ret) 1024 goto fail; 1025 } 1026 } 1027 return 0; 1028 fail: 1029 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1030 spt, ge.val64, ge.type); 1031 return ret; 1032 } 1033 1034 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, 1035 struct intel_gvt_gtt_entry *se, unsigned long index) 1036 { 1037 struct intel_vgpu *vgpu = spt->vgpu; 1038 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1039 int ret; 1040 1041 trace_spt_guest_change(spt->vgpu->id, "remove", spt, 1042 spt->shadow_page.type, se->val64, index); 1043 1044 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", 1045 se->type, index, se->val64); 1046 1047 if (!ops->test_present(se)) 1048 return 0; 1049 1050 if (ops->get_pfn(se) == 1051 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) 1052 return 0; 1053 1054 if (gtt_type_is_pt(get_next_pt_type(se->type))) { 1055 struct intel_vgpu_ppgtt_spt *s = 1056 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); 1057 if (!s) { 1058 gvt_vgpu_err("fail to find guest page\n"); 1059 ret = -ENXIO; 1060 goto fail; 1061 } 1062 ret = ppgtt_invalidate_spt(s); 1063 if (ret) 1064 goto fail; 1065 } else 1066 ppgtt_invalidate_pte(spt, se); 1067 1068 return 0; 1069 fail: 1070 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1071 spt, se->val64, se->type); 1072 return ret; 1073 } 1074 1075 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, 1076 struct intel_gvt_gtt_entry *we, unsigned long index) 1077 { 1078 struct intel_vgpu *vgpu = spt->vgpu; 1079 struct intel_gvt_gtt_entry m; 1080 struct intel_vgpu_ppgtt_spt *s; 1081 int ret; 1082 1083 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, 1084 we->val64, index); 1085 1086 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", 1087 we->type, index, we->val64); 1088 1089 if (gtt_type_is_pt(get_next_pt_type(we->type))) { 1090 s = ppgtt_populate_spt_by_guest_entry(vgpu, we); 1091 if (IS_ERR(s)) { 1092 ret = PTR_ERR(s); 1093 goto fail; 1094 } 1095 ppgtt_get_shadow_entry(spt, &m, index); 1096 ppgtt_generate_shadow_entry(&m, s, we); 1097 ppgtt_set_shadow_entry(spt, &m, index); 1098 } else { 1099 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we); 1100 if (ret) 1101 goto fail; 1102 } 1103 return 0; 1104 fail: 1105 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", 1106 spt, we->val64, we->type); 1107 return ret; 1108 } 1109 1110 static int sync_oos_page(struct intel_vgpu *vgpu, 1111 struct intel_vgpu_oos_page *oos_page) 1112 { 1113 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1114 struct intel_gvt *gvt = vgpu->gvt; 1115 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1116 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1117 struct intel_gvt_gtt_entry old, new; 1118 int index; 1119 int ret; 1120 1121 trace_oos_change(vgpu->id, "sync", oos_page->id, 1122 spt, spt->guest_page.type); 1123 1124 old.type = new.type = get_entry_type(spt->guest_page.type); 1125 old.val64 = new.val64 = 0; 1126 1127 for (index = 0; index < (I915_GTT_PAGE_SIZE >> 1128 info->gtt_entry_size_shift); index++) { 1129 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); 1130 ops->get_entry(NULL, &new, index, true, 1131 spt->guest_page.gfn << PAGE_SHIFT, vgpu); 1132 1133 if (old.val64 == new.val64 1134 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) 1135 continue; 1136 1137 trace_oos_sync(vgpu->id, oos_page->id, 1138 spt, spt->guest_page.type, 1139 new.val64, index); 1140 1141 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); 1142 if (ret) 1143 return ret; 1144 1145 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); 1146 } 1147 1148 spt->guest_page.write_cnt = 0; 1149 list_del_init(&spt->post_shadow_list); 1150 return 0; 1151 } 1152 1153 static int detach_oos_page(struct intel_vgpu *vgpu, 1154 struct intel_vgpu_oos_page *oos_page) 1155 { 1156 struct intel_gvt *gvt = vgpu->gvt; 1157 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1158 1159 trace_oos_change(vgpu->id, "detach", oos_page->id, 1160 spt, spt->guest_page.type); 1161 1162 spt->guest_page.write_cnt = 0; 1163 spt->guest_page.oos_page = NULL; 1164 oos_page->spt = NULL; 1165 1166 list_del_init(&oos_page->vm_list); 1167 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); 1168 1169 return 0; 1170 } 1171 1172 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, 1173 struct intel_vgpu_ppgtt_spt *spt) 1174 { 1175 struct intel_gvt *gvt = spt->vgpu->gvt; 1176 int ret; 1177 1178 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, 1179 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 1180 oos_page->mem, I915_GTT_PAGE_SIZE); 1181 if (ret) 1182 return ret; 1183 1184 oos_page->spt = spt; 1185 spt->guest_page.oos_page = oos_page; 1186 1187 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); 1188 1189 trace_oos_change(spt->vgpu->id, "attach", oos_page->id, 1190 spt, spt->guest_page.type); 1191 return 0; 1192 } 1193 1194 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) 1195 { 1196 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1197 int ret; 1198 1199 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); 1200 if (ret) 1201 return ret; 1202 1203 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, 1204 spt, spt->guest_page.type); 1205 1206 list_del_init(&oos_page->vm_list); 1207 return sync_oos_page(spt->vgpu, oos_page); 1208 } 1209 1210 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) 1211 { 1212 struct intel_gvt *gvt = spt->vgpu->gvt; 1213 struct intel_gvt_gtt *gtt = &gvt->gtt; 1214 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1215 int ret; 1216 1217 WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); 1218 1219 if (list_empty(>t->oos_page_free_list_head)) { 1220 oos_page = container_of(gtt->oos_page_use_list_head.next, 1221 struct intel_vgpu_oos_page, list); 1222 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1223 if (ret) 1224 return ret; 1225 ret = detach_oos_page(spt->vgpu, oos_page); 1226 if (ret) 1227 return ret; 1228 } else 1229 oos_page = container_of(gtt->oos_page_free_list_head.next, 1230 struct intel_vgpu_oos_page, list); 1231 return attach_oos_page(oos_page, spt); 1232 } 1233 1234 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) 1235 { 1236 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1237 1238 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) 1239 return -EINVAL; 1240 1241 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, 1242 spt, spt->guest_page.type); 1243 1244 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); 1245 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); 1246 } 1247 1248 /** 1249 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU 1250 * @vgpu: a vGPU 1251 * 1252 * This function is called before submitting a guest workload to host, 1253 * to sync all the out-of-synced shadow for vGPU 1254 * 1255 * Returns: 1256 * Zero on success, negative error code if failed. 1257 */ 1258 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) 1259 { 1260 struct list_head *pos, *n; 1261 struct intel_vgpu_oos_page *oos_page; 1262 int ret; 1263 1264 if (!enable_out_of_sync) 1265 return 0; 1266 1267 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { 1268 oos_page = container_of(pos, 1269 struct intel_vgpu_oos_page, vm_list); 1270 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1271 if (ret) 1272 return ret; 1273 } 1274 return 0; 1275 } 1276 1277 /* 1278 * The heart of PPGTT shadow page table. 1279 */ 1280 static int ppgtt_handle_guest_write_page_table( 1281 struct intel_vgpu_ppgtt_spt *spt, 1282 struct intel_gvt_gtt_entry *we, unsigned long index) 1283 { 1284 struct intel_vgpu *vgpu = spt->vgpu; 1285 int type = spt->shadow_page.type; 1286 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1287 struct intel_gvt_gtt_entry old_se; 1288 int new_present; 1289 int ret; 1290 1291 new_present = ops->test_present(we); 1292 1293 /* 1294 * Adding the new entry first and then removing the old one, that can 1295 * guarantee the ppgtt table is validated during the window between 1296 * adding and removal. 1297 */ 1298 ppgtt_get_shadow_entry(spt, &old_se, index); 1299 1300 if (new_present) { 1301 ret = ppgtt_handle_guest_entry_add(spt, we, index); 1302 if (ret) 1303 goto fail; 1304 } 1305 1306 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index); 1307 if (ret) 1308 goto fail; 1309 1310 if (!new_present) { 1311 ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); 1312 ppgtt_set_shadow_entry(spt, &old_se, index); 1313 } 1314 1315 return 0; 1316 fail: 1317 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", 1318 spt, we->val64, we->type); 1319 return ret; 1320 } 1321 1322 1323 1324 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt) 1325 { 1326 return enable_out_of_sync 1327 && gtt_type_is_pte_pt(spt->guest_page.type) 1328 && spt->guest_page.write_cnt >= 2; 1329 } 1330 1331 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, 1332 unsigned long index) 1333 { 1334 set_bit(index, spt->post_shadow_bitmap); 1335 if (!list_empty(&spt->post_shadow_list)) 1336 return; 1337 1338 list_add_tail(&spt->post_shadow_list, 1339 &spt->vgpu->gtt.post_shadow_list_head); 1340 } 1341 1342 /** 1343 * intel_vgpu_flush_post_shadow - flush the post shadow transactions 1344 * @vgpu: a vGPU 1345 * 1346 * This function is called before submitting a guest workload to host, 1347 * to flush all the post shadows for a vGPU. 1348 * 1349 * Returns: 1350 * Zero on success, negative error code if failed. 1351 */ 1352 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) 1353 { 1354 struct list_head *pos, *n; 1355 struct intel_vgpu_ppgtt_spt *spt; 1356 struct intel_gvt_gtt_entry ge; 1357 unsigned long index; 1358 int ret; 1359 1360 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { 1361 spt = container_of(pos, struct intel_vgpu_ppgtt_spt, 1362 post_shadow_list); 1363 1364 for_each_set_bit(index, spt->post_shadow_bitmap, 1365 GTT_ENTRY_NUM_IN_ONE_PAGE) { 1366 ppgtt_get_guest_entry(spt, &ge, index); 1367 1368 ret = ppgtt_handle_guest_write_page_table(spt, 1369 &ge, index); 1370 if (ret) 1371 return ret; 1372 clear_bit(index, spt->post_shadow_bitmap); 1373 } 1374 list_del_init(&spt->post_shadow_list); 1375 } 1376 return 0; 1377 } 1378 1379 static int ppgtt_handle_guest_write_page_table_bytes( 1380 struct intel_vgpu_ppgtt_spt *spt, 1381 u64 pa, void *p_data, int bytes) 1382 { 1383 struct intel_vgpu *vgpu = spt->vgpu; 1384 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1385 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1386 struct intel_gvt_gtt_entry we, se; 1387 unsigned long index; 1388 int ret; 1389 1390 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; 1391 1392 ppgtt_get_guest_entry(spt, &we, index); 1393 1394 ops->test_pse(&we); 1395 1396 if (bytes == info->gtt_entry_size) { 1397 ret = ppgtt_handle_guest_write_page_table(spt, &we, index); 1398 if (ret) 1399 return ret; 1400 } else { 1401 if (!test_bit(index, spt->post_shadow_bitmap)) { 1402 int type = spt->shadow_page.type; 1403 1404 ppgtt_get_shadow_entry(spt, &se, index); 1405 ret = ppgtt_handle_guest_entry_removal(spt, &se, index); 1406 if (ret) 1407 return ret; 1408 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); 1409 ppgtt_set_shadow_entry(spt, &se, index); 1410 } 1411 ppgtt_set_post_shadow(spt, index); 1412 } 1413 1414 if (!enable_out_of_sync) 1415 return 0; 1416 1417 spt->guest_page.write_cnt++; 1418 1419 if (spt->guest_page.oos_page) 1420 ops->set_entry(spt->guest_page.oos_page->mem, &we, index, 1421 false, 0, vgpu); 1422 1423 if (can_do_out_of_sync(spt)) { 1424 if (!spt->guest_page.oos_page) 1425 ppgtt_allocate_oos_page(spt); 1426 1427 ret = ppgtt_set_guest_page_oos(spt); 1428 if (ret < 0) 1429 return ret; 1430 } 1431 return 0; 1432 } 1433 1434 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) 1435 { 1436 struct intel_vgpu *vgpu = mm->vgpu; 1437 struct intel_gvt *gvt = vgpu->gvt; 1438 struct intel_gvt_gtt *gtt = &gvt->gtt; 1439 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1440 struct intel_gvt_gtt_entry se; 1441 int index; 1442 1443 if (!mm->ppgtt_mm.shadowed) 1444 return; 1445 1446 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { 1447 ppgtt_get_shadow_root_entry(mm, &se, index); 1448 1449 if (!ops->test_present(&se)) 1450 continue; 1451 1452 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se); 1453 se.val64 = 0; 1454 ppgtt_set_shadow_root_entry(mm, &se, index); 1455 1456 trace_spt_guest_change(vgpu->id, "destroy root pointer", 1457 NULL, se.type, se.val64, index); 1458 } 1459 1460 mm->ppgtt_mm.shadowed = false; 1461 } 1462 1463 1464 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) 1465 { 1466 struct intel_vgpu *vgpu = mm->vgpu; 1467 struct intel_gvt *gvt = vgpu->gvt; 1468 struct intel_gvt_gtt *gtt = &gvt->gtt; 1469 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1470 struct intel_vgpu_ppgtt_spt *spt; 1471 struct intel_gvt_gtt_entry ge, se; 1472 int index, ret; 1473 1474 if (mm->ppgtt_mm.shadowed) 1475 return 0; 1476 1477 mm->ppgtt_mm.shadowed = true; 1478 1479 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { 1480 ppgtt_get_guest_root_entry(mm, &ge, index); 1481 1482 if (!ops->test_present(&ge)) 1483 continue; 1484 1485 trace_spt_guest_change(vgpu->id, __func__, NULL, 1486 ge.type, ge.val64, index); 1487 1488 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1489 if (IS_ERR(spt)) { 1490 gvt_vgpu_err("fail to populate guest root pointer\n"); 1491 ret = PTR_ERR(spt); 1492 goto fail; 1493 } 1494 ppgtt_generate_shadow_entry(&se, spt, &ge); 1495 ppgtt_set_shadow_root_entry(mm, &se, index); 1496 1497 trace_spt_guest_change(vgpu->id, "populate root pointer", 1498 NULL, se.type, se.val64, index); 1499 } 1500 1501 return 0; 1502 fail: 1503 invalidate_ppgtt_mm(mm); 1504 return ret; 1505 } 1506 1507 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu) 1508 { 1509 struct intel_vgpu_mm *mm; 1510 1511 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1512 if (!mm) 1513 return NULL; 1514 1515 mm->vgpu = vgpu; 1516 kref_init(&mm->ref); 1517 atomic_set(&mm->pincount, 0); 1518 1519 return mm; 1520 } 1521 1522 static void vgpu_free_mm(struct intel_vgpu_mm *mm) 1523 { 1524 kfree(mm); 1525 } 1526 1527 /** 1528 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU 1529 * @vgpu: a vGPU 1530 * @root_entry_type: ppgtt root entry type 1531 * @pdps: guest pdps. 1532 * 1533 * This function is used to create a ppgtt mm object for a vGPU. 1534 * 1535 * Returns: 1536 * Zero on success, negative error code in pointer if failed. 1537 */ 1538 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 1539 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 1540 { 1541 struct intel_gvt *gvt = vgpu->gvt; 1542 struct intel_vgpu_mm *mm; 1543 int ret; 1544 1545 mm = vgpu_alloc_mm(vgpu); 1546 if (!mm) 1547 return ERR_PTR(-ENOMEM); 1548 1549 mm->type = INTEL_GVT_MM_PPGTT; 1550 1551 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && 1552 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY); 1553 mm->ppgtt_mm.root_entry_type = root_entry_type; 1554 1555 INIT_LIST_HEAD(&mm->ppgtt_mm.list); 1556 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); 1557 1558 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 1559 mm->ppgtt_mm.guest_pdps[0] = pdps[0]; 1560 else 1561 memcpy(mm->ppgtt_mm.guest_pdps, pdps, 1562 sizeof(mm->ppgtt_mm.guest_pdps)); 1563 1564 ret = shadow_ppgtt_mm(mm); 1565 if (ret) { 1566 gvt_vgpu_err("failed to shadow ppgtt mm\n"); 1567 vgpu_free_mm(mm); 1568 return ERR_PTR(ret); 1569 } 1570 1571 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1572 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1573 return mm; 1574 } 1575 1576 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) 1577 { 1578 struct intel_vgpu_mm *mm; 1579 unsigned long nr_entries; 1580 1581 mm = vgpu_alloc_mm(vgpu); 1582 if (!mm) 1583 return ERR_PTR(-ENOMEM); 1584 1585 mm->type = INTEL_GVT_MM_GGTT; 1586 1587 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; 1588 mm->ggtt_mm.virtual_ggtt = 1589 vzalloc(array_size(nr_entries, 1590 vgpu->gvt->device_info.gtt_entry_size)); 1591 if (!mm->ggtt_mm.virtual_ggtt) { 1592 vgpu_free_mm(mm); 1593 return ERR_PTR(-ENOMEM); 1594 } 1595 1596 return mm; 1597 } 1598 1599 /** 1600 * _intel_vgpu_mm_release - destroy a mm object 1601 * @mm_ref: a kref object 1602 * 1603 * This function is used to destroy a mm object for vGPU 1604 * 1605 */ 1606 void _intel_vgpu_mm_release(struct kref *mm_ref) 1607 { 1608 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1609 1610 if (GEM_WARN_ON(atomic_read(&mm->pincount))) 1611 gvt_err("vgpu mm pin count bug detected\n"); 1612 1613 if (mm->type == INTEL_GVT_MM_PPGTT) { 1614 list_del(&mm->ppgtt_mm.list); 1615 list_del(&mm->ppgtt_mm.lru_list); 1616 invalidate_ppgtt_mm(mm); 1617 } else { 1618 vfree(mm->ggtt_mm.virtual_ggtt); 1619 } 1620 1621 vgpu_free_mm(mm); 1622 } 1623 1624 /** 1625 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object 1626 * @mm: a vGPU mm object 1627 * 1628 * This function is called when user doesn't want to use a vGPU mm object 1629 */ 1630 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1631 { 1632 atomic_dec(&mm->pincount); 1633 } 1634 1635 /** 1636 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object 1637 * @vgpu: a vGPU 1638 * 1639 * This function is called when user wants to use a vGPU mm object. If this 1640 * mm object hasn't been shadowed yet, the shadow will be populated at this 1641 * time. 1642 * 1643 * Returns: 1644 * Zero on success, negative error code if failed. 1645 */ 1646 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) 1647 { 1648 int ret; 1649 1650 atomic_inc(&mm->pincount); 1651 1652 if (mm->type == INTEL_GVT_MM_PPGTT) { 1653 ret = shadow_ppgtt_mm(mm); 1654 if (ret) 1655 return ret; 1656 1657 list_move_tail(&mm->ppgtt_mm.lru_list, 1658 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 1659 1660 } 1661 1662 return 0; 1663 } 1664 1665 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) 1666 { 1667 struct intel_vgpu_mm *mm; 1668 struct list_head *pos, *n; 1669 1670 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 1671 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 1672 1673 if (atomic_read(&mm->pincount)) 1674 continue; 1675 1676 list_del_init(&mm->ppgtt_mm.lru_list); 1677 invalidate_ppgtt_mm(mm); 1678 return 1; 1679 } 1680 return 0; 1681 } 1682 1683 /* 1684 * GMA translation APIs. 1685 */ 1686 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, 1687 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) 1688 { 1689 struct intel_vgpu *vgpu = mm->vgpu; 1690 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1691 struct intel_vgpu_ppgtt_spt *s; 1692 1693 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 1694 if (!s) 1695 return -ENXIO; 1696 1697 if (!guest) 1698 ppgtt_get_shadow_entry(s, e, index); 1699 else 1700 ppgtt_get_guest_entry(s, e, index); 1701 return 0; 1702 } 1703 1704 /** 1705 * intel_vgpu_gma_to_gpa - translate a gma to GPA 1706 * @mm: mm object. could be a PPGTT or GGTT mm object 1707 * @gma: graphics memory address in this mm object 1708 * 1709 * This function is used to translate a graphics memory address in specific 1710 * graphics memory space to guest physical address. 1711 * 1712 * Returns: 1713 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed. 1714 */ 1715 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) 1716 { 1717 struct intel_vgpu *vgpu = mm->vgpu; 1718 struct intel_gvt *gvt = vgpu->gvt; 1719 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; 1720 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; 1721 unsigned long gpa = INTEL_GVT_INVALID_ADDR; 1722 unsigned long gma_index[4]; 1723 struct intel_gvt_gtt_entry e; 1724 int i, levels = 0; 1725 int ret; 1726 1727 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT && 1728 mm->type != INTEL_GVT_MM_PPGTT); 1729 1730 if (mm->type == INTEL_GVT_MM_GGTT) { 1731 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1732 goto err; 1733 1734 ggtt_get_guest_entry(mm, &e, 1735 gma_ops->gma_to_ggtt_pte_index(gma)); 1736 1737 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 1738 + (gma & ~I915_GTT_PAGE_MASK); 1739 1740 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); 1741 } else { 1742 switch (mm->ppgtt_mm.root_entry_type) { 1743 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 1744 ppgtt_get_shadow_root_entry(mm, &e, 0); 1745 1746 gma_index[0] = gma_ops->gma_to_pml4_index(gma); 1747 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 1748 gma_index[2] = gma_ops->gma_to_pde_index(gma); 1749 gma_index[3] = gma_ops->gma_to_pte_index(gma); 1750 levels = 4; 1751 break; 1752 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 1753 ppgtt_get_shadow_root_entry(mm, &e, 1754 gma_ops->gma_to_l3_pdp_index(gma)); 1755 1756 gma_index[0] = gma_ops->gma_to_pde_index(gma); 1757 gma_index[1] = gma_ops->gma_to_pte_index(gma); 1758 levels = 2; 1759 break; 1760 default: 1761 GEM_BUG_ON(1); 1762 } 1763 1764 /* walk the shadow page table and get gpa from guest entry */ 1765 for (i = 0; i < levels; i++) { 1766 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 1767 (i == levels - 1)); 1768 if (ret) 1769 goto err; 1770 1771 if (!pte_ops->test_present(&e)) { 1772 gvt_dbg_core("GMA 0x%lx is not present\n", gma); 1773 goto err; 1774 } 1775 } 1776 1777 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + 1778 (gma & ~I915_GTT_PAGE_MASK); 1779 trace_gma_translate(vgpu->id, "ppgtt", 0, 1780 mm->ppgtt_mm.root_entry_type, gma, gpa); 1781 } 1782 1783 return gpa; 1784 err: 1785 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1786 return INTEL_GVT_INVALID_ADDR; 1787 } 1788 1789 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, 1790 unsigned int off, void *p_data, unsigned int bytes) 1791 { 1792 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1793 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1794 unsigned long index = off >> info->gtt_entry_size_shift; 1795 struct intel_gvt_gtt_entry e; 1796 1797 if (bytes != 4 && bytes != 8) 1798 return -EINVAL; 1799 1800 ggtt_get_guest_entry(ggtt_mm, &e, index); 1801 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), 1802 bytes); 1803 return 0; 1804 } 1805 1806 /** 1807 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read 1808 * @vgpu: a vGPU 1809 * @off: register offset 1810 * @p_data: data will be returned to guest 1811 * @bytes: data length 1812 * 1813 * This function is used to emulate the GTT MMIO register read 1814 * 1815 * Returns: 1816 * Zero on success, error code if failed. 1817 */ 1818 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 1819 void *p_data, unsigned int bytes) 1820 { 1821 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1822 int ret; 1823 1824 if (bytes != 4 && bytes != 8) 1825 return -EINVAL; 1826 1827 off -= info->gtt_start_offset; 1828 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes); 1829 return ret; 1830 } 1831 1832 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, 1833 struct intel_gvt_gtt_entry *entry) 1834 { 1835 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 1836 unsigned long pfn; 1837 1838 pfn = pte_ops->get_pfn(entry); 1839 if (pfn != vgpu->gvt->gtt.scratch_mfn) 1840 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, 1841 pfn << PAGE_SHIFT); 1842 } 1843 1844 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1845 void *p_data, unsigned int bytes) 1846 { 1847 struct intel_gvt *gvt = vgpu->gvt; 1848 const struct intel_gvt_device_info *info = &gvt->device_info; 1849 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1850 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1851 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 1852 unsigned long gma, gfn; 1853 struct intel_gvt_gtt_entry e, m; 1854 dma_addr_t dma_addr; 1855 int ret; 1856 1857 if (bytes != 4 && bytes != 8) 1858 return -EINVAL; 1859 1860 gma = g_gtt_index << I915_GTT_PAGE_SHIFT; 1861 1862 /* the VM may configure the whole GM space when ballooning is used */ 1863 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1864 return 0; 1865 1866 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 1867 1868 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1869 bytes); 1870 1871 if (ops->test_present(&e)) { 1872 gfn = ops->get_pfn(&e); 1873 m = e; 1874 1875 /* one PTE update may be issued in multiple writes and the 1876 * first write may not construct a valid gfn 1877 */ 1878 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { 1879 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1880 goto out; 1881 } 1882 1883 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, 1884 &dma_addr); 1885 if (ret) { 1886 gvt_vgpu_err("fail to populate guest ggtt entry\n"); 1887 /* guest driver may read/write the entry when partial 1888 * update the entry in this situation p2m will fail 1889 * settting the shadow entry to point to a scratch page 1890 */ 1891 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1892 } else 1893 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 1894 } else { 1895 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); 1896 ggtt_invalidate_pte(vgpu, &m); 1897 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1898 ops->clear_present(&m); 1899 } 1900 1901 out: 1902 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 1903 ggtt_invalidate(gvt->dev_priv); 1904 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 1905 return 0; 1906 } 1907 1908 /* 1909 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write 1910 * @vgpu: a vGPU 1911 * @off: register offset 1912 * @p_data: data from guest write 1913 * @bytes: data length 1914 * 1915 * This function is used to emulate the GTT MMIO register write 1916 * 1917 * Returns: 1918 * Zero on success, error code if failed. 1919 */ 1920 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, 1921 unsigned int off, void *p_data, unsigned int bytes) 1922 { 1923 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1924 int ret; 1925 1926 if (bytes != 4 && bytes != 8) 1927 return -EINVAL; 1928 1929 off -= info->gtt_start_offset; 1930 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); 1931 return ret; 1932 } 1933 1934 static int alloc_scratch_pages(struct intel_vgpu *vgpu, 1935 intel_gvt_gtt_type_t type) 1936 { 1937 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 1938 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1939 int page_entry_num = I915_GTT_PAGE_SIZE >> 1940 vgpu->gvt->device_info.gtt_entry_size_shift; 1941 void *scratch_pt; 1942 int i; 1943 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 1944 dma_addr_t daddr; 1945 1946 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1947 return -EINVAL; 1948 1949 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1950 if (!scratch_pt) { 1951 gvt_vgpu_err("fail to allocate scratch page\n"); 1952 return -ENOMEM; 1953 } 1954 1955 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1956 4096, PCI_DMA_BIDIRECTIONAL); 1957 if (dma_mapping_error(dev, daddr)) { 1958 gvt_vgpu_err("fail to dmamap scratch_pt\n"); 1959 __free_page(virt_to_page(scratch_pt)); 1960 return -ENOMEM; 1961 } 1962 gtt->scratch_pt[type].page_mfn = 1963 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 1964 gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 1965 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1966 vgpu->id, type, gtt->scratch_pt[type].page_mfn); 1967 1968 /* Build the tree by full filled the scratch pt with the entries which 1969 * point to the next level scratch pt or scratch page. The 1970 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1971 * 'type' pt. 1972 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1973 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 1974 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1975 */ 1976 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 1977 struct intel_gvt_gtt_entry se; 1978 1979 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); 1980 se.type = get_entry_type(type - 1); 1981 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); 1982 1983 /* The entry parameters like present/writeable/cache type 1984 * set to the same as i915's scratch page tree. 1985 */ 1986 se.val64 |= _PAGE_PRESENT | _PAGE_RW; 1987 if (type == GTT_TYPE_PPGTT_PDE_PT) 1988 se.val64 |= PPAT_CACHED; 1989 1990 for (i = 0; i < page_entry_num; i++) 1991 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 1992 } 1993 1994 return 0; 1995 } 1996 1997 static int release_scratch_page_tree(struct intel_vgpu *vgpu) 1998 { 1999 int i; 2000 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 2001 dma_addr_t daddr; 2002 2003 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2004 if (vgpu->gtt.scratch_pt[i].page != NULL) { 2005 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << 2006 I915_GTT_PAGE_SHIFT); 2007 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2008 __free_page(vgpu->gtt.scratch_pt[i].page); 2009 vgpu->gtt.scratch_pt[i].page = NULL; 2010 vgpu->gtt.scratch_pt[i].page_mfn = 0; 2011 } 2012 } 2013 2014 return 0; 2015 } 2016 2017 static int create_scratch_page_tree(struct intel_vgpu *vgpu) 2018 { 2019 int i, ret; 2020 2021 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2022 ret = alloc_scratch_pages(vgpu, i); 2023 if (ret) 2024 goto err; 2025 } 2026 2027 return 0; 2028 2029 err: 2030 release_scratch_page_tree(vgpu); 2031 return ret; 2032 } 2033 2034 /** 2035 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization 2036 * @vgpu: a vGPU 2037 * 2038 * This function is used to initialize per-vGPU graphics memory virtualization 2039 * components. 2040 * 2041 * Returns: 2042 * Zero on success, error code if failed. 2043 */ 2044 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) 2045 { 2046 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2047 2048 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL); 2049 2050 INIT_LIST_HEAD(>t->ppgtt_mm_list_head); 2051 INIT_LIST_HEAD(>t->oos_page_list_head); 2052 INIT_LIST_HEAD(>t->post_shadow_list_head); 2053 2054 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu); 2055 if (IS_ERR(gtt->ggtt_mm)) { 2056 gvt_vgpu_err("fail to create mm for ggtt.\n"); 2057 return PTR_ERR(gtt->ggtt_mm); 2058 } 2059 2060 intel_vgpu_reset_ggtt(vgpu, false); 2061 2062 return create_scratch_page_tree(vgpu); 2063 } 2064 2065 static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) 2066 { 2067 struct list_head *pos, *n; 2068 struct intel_vgpu_mm *mm; 2069 2070 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2071 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2072 intel_vgpu_destroy_mm(mm); 2073 } 2074 2075 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) 2076 gvt_err("vgpu ppgtt mm is not fully destroyed\n"); 2077 2078 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { 2079 gvt_err("Why we still has spt not freed?\n"); 2080 ppgtt_free_all_spt(vgpu); 2081 } 2082 } 2083 2084 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2085 { 2086 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2087 vgpu->gtt.ggtt_mm = NULL; 2088 } 2089 2090 /** 2091 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2092 * @vgpu: a vGPU 2093 * 2094 * This function is used to clean up per-vGPU graphics memory virtualization 2095 * components. 2096 * 2097 * Returns: 2098 * Zero on success, error code if failed. 2099 */ 2100 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2101 { 2102 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2103 intel_vgpu_destroy_ggtt_mm(vgpu); 2104 release_scratch_page_tree(vgpu); 2105 } 2106 2107 static void clean_spt_oos(struct intel_gvt *gvt) 2108 { 2109 struct intel_gvt_gtt *gtt = &gvt->gtt; 2110 struct list_head *pos, *n; 2111 struct intel_vgpu_oos_page *oos_page; 2112 2113 WARN(!list_empty(>t->oos_page_use_list_head), 2114 "someone is still using oos page\n"); 2115 2116 list_for_each_safe(pos, n, >t->oos_page_free_list_head) { 2117 oos_page = container_of(pos, struct intel_vgpu_oos_page, list); 2118 list_del(&oos_page->list); 2119 kfree(oos_page); 2120 } 2121 } 2122 2123 static int setup_spt_oos(struct intel_gvt *gvt) 2124 { 2125 struct intel_gvt_gtt *gtt = &gvt->gtt; 2126 struct intel_vgpu_oos_page *oos_page; 2127 int i; 2128 int ret; 2129 2130 INIT_LIST_HEAD(>t->oos_page_free_list_head); 2131 INIT_LIST_HEAD(>t->oos_page_use_list_head); 2132 2133 for (i = 0; i < preallocated_oos_pages; i++) { 2134 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2135 if (!oos_page) { 2136 ret = -ENOMEM; 2137 goto fail; 2138 } 2139 2140 INIT_LIST_HEAD(&oos_page->list); 2141 INIT_LIST_HEAD(&oos_page->vm_list); 2142 oos_page->id = i; 2143 list_add_tail(&oos_page->list, >t->oos_page_free_list_head); 2144 } 2145 2146 gvt_dbg_mm("%d oos pages preallocated\n", i); 2147 2148 return 0; 2149 fail: 2150 clean_spt_oos(gvt); 2151 return ret; 2152 } 2153 2154 /** 2155 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object 2156 * @vgpu: a vGPU 2157 * @page_table_level: PPGTT page table level 2158 * @root_entry: PPGTT page table root pointers 2159 * 2160 * This function is used to find a PPGTT mm object from mm object pool 2161 * 2162 * Returns: 2163 * pointer to mm object on success, NULL if failed. 2164 */ 2165 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 2166 u64 pdps[]) 2167 { 2168 struct intel_vgpu_mm *mm; 2169 struct list_head *pos; 2170 2171 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) { 2172 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2173 2174 switch (mm->ppgtt_mm.root_entry_type) { 2175 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 2176 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0]) 2177 return mm; 2178 break; 2179 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 2180 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps, 2181 sizeof(mm->ppgtt_mm.guest_pdps))) 2182 return mm; 2183 break; 2184 default: 2185 GEM_BUG_ON(1); 2186 } 2187 } 2188 return NULL; 2189 } 2190 2191 /** 2192 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object. 2193 * @vgpu: a vGPU 2194 * @root_entry_type: ppgtt root entry type 2195 * @pdps: guest pdps 2196 * 2197 * This function is used to find or create a PPGTT mm object from a guest. 2198 * 2199 * Returns: 2200 * Zero on success, negative error code if failed. 2201 */ 2202 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 2203 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 2204 { 2205 struct intel_vgpu_mm *mm; 2206 2207 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2208 if (mm) { 2209 intel_vgpu_mm_get(mm); 2210 } else { 2211 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps); 2212 if (IS_ERR(mm)) 2213 gvt_vgpu_err("fail to create mm\n"); 2214 } 2215 return mm; 2216 } 2217 2218 /** 2219 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object. 2220 * @vgpu: a vGPU 2221 * @pdps: guest pdps 2222 * 2223 * This function is used to find a PPGTT mm object from a guest and destroy it. 2224 * 2225 * Returns: 2226 * Zero on success, negative error code if failed. 2227 */ 2228 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]) 2229 { 2230 struct intel_vgpu_mm *mm; 2231 2232 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2233 if (!mm) { 2234 gvt_vgpu_err("fail to find ppgtt instance.\n"); 2235 return -EINVAL; 2236 } 2237 intel_vgpu_mm_put(mm); 2238 return 0; 2239 } 2240 2241 /** 2242 * intel_gvt_init_gtt - initialize mm components of a GVT device 2243 * @gvt: GVT device 2244 * 2245 * This function is called at the initialization stage, to initialize 2246 * the mm components of a GVT device. 2247 * 2248 * Returns: 2249 * zero on success, negative error code if failed. 2250 */ 2251 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2252 { 2253 int ret; 2254 void *page; 2255 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2256 dma_addr_t daddr; 2257 2258 gvt_dbg_core("init gtt\n"); 2259 2260 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) 2261 || IS_KABYLAKE(gvt->dev_priv)) { 2262 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2263 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2264 } else { 2265 return -ENODEV; 2266 } 2267 2268 page = (void *)get_zeroed_page(GFP_KERNEL); 2269 if (!page) { 2270 gvt_err("fail to allocate scratch ggtt page\n"); 2271 return -ENOMEM; 2272 } 2273 2274 daddr = dma_map_page(dev, virt_to_page(page), 0, 2275 4096, PCI_DMA_BIDIRECTIONAL); 2276 if (dma_mapping_error(dev, daddr)) { 2277 gvt_err("fail to dmamap scratch ggtt page\n"); 2278 __free_page(virt_to_page(page)); 2279 return -ENOMEM; 2280 } 2281 2282 gvt->gtt.scratch_page = virt_to_page(page); 2283 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2284 2285 if (enable_out_of_sync) { 2286 ret = setup_spt_oos(gvt); 2287 if (ret) { 2288 gvt_err("fail to initialize SPT oos\n"); 2289 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2290 __free_page(gvt->gtt.scratch_page); 2291 return ret; 2292 } 2293 } 2294 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2295 return 0; 2296 } 2297 2298 /** 2299 * intel_gvt_clean_gtt - clean up mm components of a GVT device 2300 * @gvt: GVT device 2301 * 2302 * This function is called at the driver unloading stage, to clean up the 2303 * the mm components of a GVT device. 2304 * 2305 */ 2306 void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2307 { 2308 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2309 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << 2310 I915_GTT_PAGE_SHIFT); 2311 2312 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2313 2314 __free_page(gvt->gtt.scratch_page); 2315 2316 if (enable_out_of_sync) 2317 clean_spt_oos(gvt); 2318 } 2319 2320 /** 2321 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances 2322 * @vgpu: a vGPU 2323 * 2324 * This function is called when invalidate all PPGTT instances of a vGPU. 2325 * 2326 */ 2327 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) 2328 { 2329 struct list_head *pos, *n; 2330 struct intel_vgpu_mm *mm; 2331 2332 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2333 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2334 if (mm->type == INTEL_GVT_MM_PPGTT) { 2335 list_del_init(&mm->ppgtt_mm.lru_list); 2336 if (mm->ppgtt_mm.shadowed) 2337 invalidate_ppgtt_mm(mm); 2338 } 2339 } 2340 } 2341 2342 /** 2343 * intel_vgpu_reset_ggtt - reset the GGTT entry 2344 * @vgpu: a vGPU 2345 * @invalidate_old: invalidate old entries 2346 * 2347 * This function is called at the vGPU create stage 2348 * to reset all the GGTT entries. 2349 * 2350 */ 2351 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) 2352 { 2353 struct intel_gvt *gvt = vgpu->gvt; 2354 struct drm_i915_private *dev_priv = gvt->dev_priv; 2355 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2356 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2357 struct intel_gvt_gtt_entry old_entry; 2358 u32 index; 2359 u32 num_entries; 2360 2361 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn); 2362 pte_ops->set_present(&entry); 2363 2364 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2365 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2366 while (num_entries--) { 2367 if (invalidate_old) { 2368 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2369 ggtt_invalidate_pte(vgpu, &old_entry); 2370 } 2371 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2372 } 2373 2374 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2375 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2376 while (num_entries--) { 2377 if (invalidate_old) { 2378 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2379 ggtt_invalidate_pte(vgpu, &old_entry); 2380 } 2381 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2382 } 2383 2384 ggtt_invalidate(dev_priv); 2385 } 2386 2387 /** 2388 * intel_vgpu_reset_gtt - reset the all GTT related status 2389 * @vgpu: a vGPU 2390 * 2391 * This function is called from vfio core to reset reset all 2392 * GTT related status, including GGTT, PPGTT, scratch page. 2393 * 2394 */ 2395 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) 2396 { 2397 /* Shadow pages are only created when there is no page 2398 * table tracking data, so remove page tracking data after 2399 * removing the shadow pages. 2400 */ 2401 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2402 intel_vgpu_reset_ggtt(vgpu, true); 2403 } 2404