1 /* 2 * GTT virtualization 3 * 4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Zhi Wang <zhi.a.wang@intel.com> 27 * Zhenyu Wang <zhenyuw@linux.intel.com> 28 * Xiao Zheng <xiao.zheng@intel.com> 29 * 30 * Contributors: 31 * Min He <min.he@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 #include "i915_pvinfo.h" 39 #include "trace.h" 40 41 #if defined(VERBOSE_DEBUG) 42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) 43 #else 44 #define gvt_vdbg_mm(fmt, args...) 45 #endif 46 47 static bool enable_out_of_sync = false; 48 static int preallocated_oos_pages = 8192; 49 50 /* 51 * validate a gm address and related range size, 52 * translate it to host gm address 53 */ 54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 55 { 56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", 59 addr, size); 60 return false; 61 } 62 return true; 63 } 64 65 /* translate a guest gmadr to host gmadr */ 66 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) 67 { 68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), 69 "invalid guest gmadr %llx\n", g_addr)) 70 return -EACCES; 71 72 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) 73 *h_addr = vgpu_aperture_gmadr_base(vgpu) 74 + (g_addr - vgpu_aperture_offset(vgpu)); 75 else 76 *h_addr = vgpu_hidden_gmadr_base(vgpu) 77 + (g_addr - vgpu_hidden_offset(vgpu)); 78 return 0; 79 } 80 81 /* translate a host gmadr to guest gmadr */ 82 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) 83 { 84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), 85 "invalid host gmadr %llx\n", h_addr)) 86 return -EACCES; 87 88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) 89 *g_addr = vgpu_aperture_gmadr_base(vgpu) 90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); 91 else 92 *g_addr = vgpu_hidden_gmadr_base(vgpu) 93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); 94 return 0; 95 } 96 97 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 98 unsigned long *h_index) 99 { 100 u64 h_addr; 101 int ret; 102 103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, 104 &h_addr); 105 if (ret) 106 return ret; 107 108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT; 109 return 0; 110 } 111 112 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 113 unsigned long *g_index) 114 { 115 u64 g_addr; 116 int ret; 117 118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, 119 &g_addr); 120 if (ret) 121 return ret; 122 123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT; 124 return 0; 125 } 126 127 #define gtt_type_is_entry(type) \ 128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ 129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \ 130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY) 131 132 #define gtt_type_is_pt(type) \ 133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) 134 135 #define gtt_type_is_pte_pt(type) \ 136 (type == GTT_TYPE_PPGTT_PTE_PT) 137 138 #define gtt_type_is_root_pointer(type) \ 139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY) 140 141 #define gtt_init_entry(e, t, p, v) do { \ 142 (e)->type = t; \ 143 (e)->pdev = p; \ 144 memcpy(&(e)->val64, &v, sizeof(v)); \ 145 } while (0) 146 147 /* 148 * Mappings between GTT_TYPE* enumerations. 149 * Following information can be found according to the given type: 150 * - type of next level page table 151 * - type of entry inside this level page table 152 * - type of entry with PSE set 153 * 154 * If the given type doesn't have such a kind of information, 155 * e.g. give a l4 root entry type, then request to get its PSE type, 156 * give a PTE page table type, then request to get its next level page 157 * table type, as we know l4 root entry doesn't have a PSE bit, 158 * and a PTE page table doesn't have a next level page table type, 159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a 160 * page table. 161 */ 162 163 struct gtt_type_table_entry { 164 int entry_type; 165 int pt_type; 166 int next_pt_type; 167 int pse_entry_type; 168 }; 169 170 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ 171 [type] = { \ 172 .entry_type = e_type, \ 173 .pt_type = cpt_type, \ 174 .next_pt_type = npt_type, \ 175 .pse_entry_type = pse_type, \ 176 } 177 178 static struct gtt_type_table_entry gtt_type_table[] = { 179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 181 GTT_TYPE_INVALID, 182 GTT_TYPE_PPGTT_PML4_PT, 183 GTT_TYPE_INVALID), 184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, 185 GTT_TYPE_PPGTT_PML4_ENTRY, 186 GTT_TYPE_PPGTT_PML4_PT, 187 GTT_TYPE_PPGTT_PDP_PT, 188 GTT_TYPE_INVALID), 189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, 190 GTT_TYPE_PPGTT_PML4_ENTRY, 191 GTT_TYPE_PPGTT_PML4_PT, 192 GTT_TYPE_PPGTT_PDP_PT, 193 GTT_TYPE_INVALID), 194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, 195 GTT_TYPE_PPGTT_PDP_ENTRY, 196 GTT_TYPE_PPGTT_PDP_PT, 197 GTT_TYPE_PPGTT_PDE_PT, 198 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 201 GTT_TYPE_INVALID, 202 GTT_TYPE_PPGTT_PDE_PT, 203 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, 205 GTT_TYPE_PPGTT_PDP_ENTRY, 206 GTT_TYPE_PPGTT_PDP_PT, 207 GTT_TYPE_PPGTT_PDE_PT, 208 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, 210 GTT_TYPE_PPGTT_PDE_ENTRY, 211 GTT_TYPE_PPGTT_PDE_PT, 212 GTT_TYPE_PPGTT_PTE_PT, 213 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, 215 GTT_TYPE_PPGTT_PDE_ENTRY, 216 GTT_TYPE_PPGTT_PDE_PT, 217 GTT_TYPE_PPGTT_PTE_PT, 218 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 219 /* We take IPS bit as 'PSE' for PTE level. */ 220 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, 221 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 222 GTT_TYPE_PPGTT_PTE_PT, 223 GTT_TYPE_INVALID, 224 GTT_TYPE_PPGTT_PTE_64K_ENTRY), 225 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, 226 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 227 GTT_TYPE_PPGTT_PTE_PT, 228 GTT_TYPE_INVALID, 229 GTT_TYPE_PPGTT_PTE_64K_ENTRY), 230 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY, 231 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 232 GTT_TYPE_PPGTT_PTE_PT, 233 GTT_TYPE_INVALID, 234 GTT_TYPE_PPGTT_PTE_64K_ENTRY), 235 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, 236 GTT_TYPE_PPGTT_PDE_ENTRY, 237 GTT_TYPE_PPGTT_PDE_PT, 238 GTT_TYPE_INVALID, 239 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 240 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, 241 GTT_TYPE_PPGTT_PDP_ENTRY, 242 GTT_TYPE_PPGTT_PDP_PT, 243 GTT_TYPE_INVALID, 244 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 245 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, 246 GTT_TYPE_GGTT_PTE, 247 GTT_TYPE_INVALID, 248 GTT_TYPE_INVALID, 249 GTT_TYPE_INVALID), 250 }; 251 252 static inline int get_next_pt_type(int type) 253 { 254 return gtt_type_table[type].next_pt_type; 255 } 256 257 static inline int get_pt_type(int type) 258 { 259 return gtt_type_table[type].pt_type; 260 } 261 262 static inline int get_entry_type(int type) 263 { 264 return gtt_type_table[type].entry_type; 265 } 266 267 static inline int get_pse_type(int type) 268 { 269 return gtt_type_table[type].pse_entry_type; 270 } 271 272 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 273 { 274 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 275 276 return readq(addr); 277 } 278 279 static void ggtt_invalidate(struct drm_i915_private *dev_priv) 280 { 281 mmio_hw_access_pre(dev_priv); 282 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 283 mmio_hw_access_post(dev_priv); 284 } 285 286 static void write_pte64(struct drm_i915_private *dev_priv, 287 unsigned long index, u64 pte) 288 { 289 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 290 291 writeq(pte, addr); 292 } 293 294 static inline int gtt_get_entry64(void *pt, 295 struct intel_gvt_gtt_entry *e, 296 unsigned long index, bool hypervisor_access, unsigned long gpa, 297 struct intel_vgpu *vgpu) 298 { 299 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 300 int ret; 301 302 if (WARN_ON(info->gtt_entry_size != 8)) 303 return -EINVAL; 304 305 if (hypervisor_access) { 306 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + 307 (index << info->gtt_entry_size_shift), 308 &e->val64, 8); 309 if (WARN_ON(ret)) 310 return ret; 311 } else if (!pt) { 312 e->val64 = read_pte64(vgpu->gvt->dev_priv, index); 313 } else { 314 e->val64 = *((u64 *)pt + index); 315 } 316 return 0; 317 } 318 319 static inline int gtt_set_entry64(void *pt, 320 struct intel_gvt_gtt_entry *e, 321 unsigned long index, bool hypervisor_access, unsigned long gpa, 322 struct intel_vgpu *vgpu) 323 { 324 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 325 int ret; 326 327 if (WARN_ON(info->gtt_entry_size != 8)) 328 return -EINVAL; 329 330 if (hypervisor_access) { 331 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + 332 (index << info->gtt_entry_size_shift), 333 &e->val64, 8); 334 if (WARN_ON(ret)) 335 return ret; 336 } else if (!pt) { 337 write_pte64(vgpu->gvt->dev_priv, index, e->val64); 338 } else { 339 *((u64 *)pt + index) = e->val64; 340 } 341 return 0; 342 } 343 344 #define GTT_HAW 46 345 346 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) 347 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) 348 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16) 349 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) 350 351 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52) 352 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ 353 354 #define GTT_64K_PTE_STRIDE 16 355 356 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 357 { 358 unsigned long pfn; 359 360 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) 361 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; 362 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) 363 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; 364 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) 365 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT; 366 else 367 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; 368 return pfn; 369 } 370 371 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) 372 { 373 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 374 e->val64 &= ~ADDR_1G_MASK; 375 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT); 376 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { 377 e->val64 &= ~ADDR_2M_MASK; 378 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); 379 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) { 380 e->val64 &= ~ADDR_64K_MASK; 381 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT); 382 } else { 383 e->val64 &= ~ADDR_4K_MASK; 384 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); 385 } 386 387 e->val64 |= (pfn << PAGE_SHIFT); 388 } 389 390 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) 391 { 392 return !!(e->val64 & _PAGE_PSE); 393 } 394 395 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e) 396 { 397 if (gen8_gtt_test_pse(e)) { 398 switch (e->type) { 399 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 400 e->val64 &= ~_PAGE_PSE; 401 e->type = GTT_TYPE_PPGTT_PDE_ENTRY; 402 break; 403 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 404 e->type = GTT_TYPE_PPGTT_PDP_ENTRY; 405 e->val64 &= ~_PAGE_PSE; 406 break; 407 default: 408 WARN_ON(1); 409 } 410 } 411 } 412 413 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) 414 { 415 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) 416 return false; 417 418 return !!(e->val64 & GEN8_PDE_IPS_64K); 419 } 420 421 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e) 422 { 423 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) 424 return; 425 426 e->val64 &= ~GEN8_PDE_IPS_64K; 427 } 428 429 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) 430 { 431 /* 432 * i915 writes PDP root pointer registers without present bit, 433 * it also works, so we need to treat root pointer entry 434 * specifically. 435 */ 436 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY 437 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 438 return (e->val64 != 0); 439 else 440 return (e->val64 & _PAGE_PRESENT); 441 } 442 443 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) 444 { 445 e->val64 &= ~_PAGE_PRESENT; 446 } 447 448 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) 449 { 450 e->val64 |= _PAGE_PRESENT; 451 } 452 453 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) 454 { 455 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED); 456 } 457 458 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e) 459 { 460 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED; 461 } 462 463 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e) 464 { 465 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED; 466 } 467 468 /* 469 * Per-platform GMA routines. 470 */ 471 static unsigned long gma_to_ggtt_pte_index(unsigned long gma) 472 { 473 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); 474 475 trace_gma_index(__func__, gma, x); 476 return x; 477 } 478 479 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \ 480 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \ 481 { \ 482 unsigned long x = (exp); \ 483 trace_gma_index(__func__, gma, x); \ 484 return x; \ 485 } 486 487 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff)); 488 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff)); 489 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); 490 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); 491 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); 492 493 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { 494 .get_entry = gtt_get_entry64, 495 .set_entry = gtt_set_entry64, 496 .clear_present = gtt_entry_clear_present, 497 .set_present = gtt_entry_set_present, 498 .test_present = gen8_gtt_test_present, 499 .test_pse = gen8_gtt_test_pse, 500 .clear_pse = gen8_gtt_clear_pse, 501 .clear_ips = gen8_gtt_clear_ips, 502 .test_ips = gen8_gtt_test_ips, 503 .clear_64k_splited = gen8_gtt_clear_64k_splited, 504 .set_64k_splited = gen8_gtt_set_64k_splited, 505 .test_64k_splited = gen8_gtt_test_64k_splited, 506 .get_pfn = gen8_gtt_get_pfn, 507 .set_pfn = gen8_gtt_set_pfn, 508 }; 509 510 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { 511 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, 512 .gma_to_pte_index = gen8_gma_to_pte_index, 513 .gma_to_pde_index = gen8_gma_to_pde_index, 514 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index, 515 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index, 516 .gma_to_pml4_index = gen8_gma_to_pml4_index, 517 }; 518 519 /* Update entry type per pse and ips bit. */ 520 static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops, 521 struct intel_gvt_gtt_entry *entry, bool ips) 522 { 523 switch (entry->type) { 524 case GTT_TYPE_PPGTT_PDE_ENTRY: 525 case GTT_TYPE_PPGTT_PDP_ENTRY: 526 if (pte_ops->test_pse(entry)) 527 entry->type = get_pse_type(entry->type); 528 break; 529 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 530 if (ips) 531 entry->type = get_pse_type(entry->type); 532 break; 533 default: 534 GEM_BUG_ON(!gtt_type_is_entry(entry->type)); 535 } 536 537 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID); 538 } 539 540 /* 541 * MM helpers. 542 */ 543 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, 544 struct intel_gvt_gtt_entry *entry, unsigned long index, 545 bool guest) 546 { 547 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 548 549 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); 550 551 entry->type = mm->ppgtt_mm.root_entry_type; 552 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : 553 mm->ppgtt_mm.shadow_pdps, 554 entry, index, false, 0, mm->vgpu); 555 update_entry_type_for_real(pte_ops, entry, false); 556 } 557 558 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, 559 struct intel_gvt_gtt_entry *entry, unsigned long index) 560 { 561 _ppgtt_get_root_entry(mm, entry, index, true); 562 } 563 564 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, 565 struct intel_gvt_gtt_entry *entry, unsigned long index) 566 { 567 _ppgtt_get_root_entry(mm, entry, index, false); 568 } 569 570 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, 571 struct intel_gvt_gtt_entry *entry, unsigned long index, 572 bool guest) 573 { 574 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 575 576 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : 577 mm->ppgtt_mm.shadow_pdps, 578 entry, index, false, 0, mm->vgpu); 579 } 580 581 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm, 582 struct intel_gvt_gtt_entry *entry, unsigned long index) 583 { 584 _ppgtt_set_root_entry(mm, entry, index, true); 585 } 586 587 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, 588 struct intel_gvt_gtt_entry *entry, unsigned long index) 589 { 590 _ppgtt_set_root_entry(mm, entry, index, false); 591 } 592 593 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, 594 struct intel_gvt_gtt_entry *entry, unsigned long index) 595 { 596 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 597 598 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 599 600 entry->type = GTT_TYPE_GGTT_PTE; 601 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 602 false, 0, mm->vgpu); 603 } 604 605 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, 606 struct intel_gvt_gtt_entry *entry, unsigned long index) 607 { 608 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 609 610 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 611 612 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 613 false, 0, mm->vgpu); 614 } 615 616 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, 617 struct intel_gvt_gtt_entry *entry, unsigned long index) 618 { 619 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 620 621 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 622 623 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); 624 } 625 626 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 627 struct intel_gvt_gtt_entry *entry, unsigned long index) 628 { 629 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 630 631 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 632 633 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); 634 } 635 636 /* 637 * PPGTT shadow page table helpers. 638 */ 639 static inline int ppgtt_spt_get_entry( 640 struct intel_vgpu_ppgtt_spt *spt, 641 void *page_table, int type, 642 struct intel_gvt_gtt_entry *e, unsigned long index, 643 bool guest) 644 { 645 struct intel_gvt *gvt = spt->vgpu->gvt; 646 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 647 int ret; 648 649 e->type = get_entry_type(type); 650 651 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 652 return -EINVAL; 653 654 ret = ops->get_entry(page_table, e, index, guest, 655 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 656 spt->vgpu); 657 if (ret) 658 return ret; 659 660 update_entry_type_for_real(ops, e, guest ? 661 spt->guest_page.pde_ips : false); 662 663 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 664 type, e->type, index, e->val64); 665 return 0; 666 } 667 668 static inline int ppgtt_spt_set_entry( 669 struct intel_vgpu_ppgtt_spt *spt, 670 void *page_table, int type, 671 struct intel_gvt_gtt_entry *e, unsigned long index, 672 bool guest) 673 { 674 struct intel_gvt *gvt = spt->vgpu->gvt; 675 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 676 677 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 678 return -EINVAL; 679 680 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 681 type, e->type, index, e->val64); 682 683 return ops->set_entry(page_table, e, index, guest, 684 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 685 spt->vgpu); 686 } 687 688 #define ppgtt_get_guest_entry(spt, e, index) \ 689 ppgtt_spt_get_entry(spt, NULL, \ 690 spt->guest_page.type, e, index, true) 691 692 #define ppgtt_set_guest_entry(spt, e, index) \ 693 ppgtt_spt_set_entry(spt, NULL, \ 694 spt->guest_page.type, e, index, true) 695 696 #define ppgtt_get_shadow_entry(spt, e, index) \ 697 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ 698 spt->shadow_page.type, e, index, false) 699 700 #define ppgtt_set_shadow_entry(spt, e, index) \ 701 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ 702 spt->shadow_page.type, e, index, false) 703 704 static void *alloc_spt(gfp_t gfp_mask) 705 { 706 struct intel_vgpu_ppgtt_spt *spt; 707 708 spt = kzalloc(sizeof(*spt), gfp_mask); 709 if (!spt) 710 return NULL; 711 712 spt->shadow_page.page = alloc_page(gfp_mask); 713 if (!spt->shadow_page.page) { 714 kfree(spt); 715 return NULL; 716 } 717 return spt; 718 } 719 720 static void free_spt(struct intel_vgpu_ppgtt_spt *spt) 721 { 722 __free_page(spt->shadow_page.page); 723 kfree(spt); 724 } 725 726 static int detach_oos_page(struct intel_vgpu *vgpu, 727 struct intel_vgpu_oos_page *oos_page); 728 729 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) 730 { 731 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; 732 733 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); 734 735 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, 736 PCI_DMA_BIDIRECTIONAL); 737 738 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); 739 740 if (spt->guest_page.gfn) { 741 if (spt->guest_page.oos_page) 742 detach_oos_page(spt->vgpu, spt->guest_page.oos_page); 743 744 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); 745 } 746 747 list_del_init(&spt->post_shadow_list); 748 free_spt(spt); 749 } 750 751 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) 752 { 753 struct intel_vgpu_ppgtt_spt *spt; 754 struct radix_tree_iter iter; 755 void **slot; 756 757 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { 758 spt = radix_tree_deref_slot(slot); 759 ppgtt_free_spt(spt); 760 } 761 } 762 763 static int ppgtt_handle_guest_write_page_table_bytes( 764 struct intel_vgpu_ppgtt_spt *spt, 765 u64 pa, void *p_data, int bytes); 766 767 static int ppgtt_write_protection_handler( 768 struct intel_vgpu_page_track *page_track, 769 u64 gpa, void *data, int bytes) 770 { 771 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data; 772 773 int ret; 774 775 if (bytes != 4 && bytes != 8) 776 return -EINVAL; 777 778 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes); 779 if (ret) 780 return ret; 781 return ret; 782 } 783 784 /* Find a spt by guest gfn. */ 785 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( 786 struct intel_vgpu *vgpu, unsigned long gfn) 787 { 788 struct intel_vgpu_page_track *track; 789 790 track = intel_vgpu_find_page_track(vgpu, gfn); 791 if (track && track->handler == ppgtt_write_protection_handler) 792 return track->priv_data; 793 794 return NULL; 795 } 796 797 /* Find the spt by shadow page mfn. */ 798 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( 799 struct intel_vgpu *vgpu, unsigned long mfn) 800 { 801 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn); 802 } 803 804 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); 805 806 /* Allocate shadow page table without guest page. */ 807 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( 808 struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) 809 { 810 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 811 struct intel_vgpu_ppgtt_spt *spt = NULL; 812 dma_addr_t daddr; 813 int ret; 814 815 retry: 816 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); 817 if (!spt) { 818 if (reclaim_one_ppgtt_mm(vgpu->gvt)) 819 goto retry; 820 821 gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); 822 return ERR_PTR(-ENOMEM); 823 } 824 825 spt->vgpu = vgpu; 826 atomic_set(&spt->refcount, 1); 827 INIT_LIST_HEAD(&spt->post_shadow_list); 828 829 /* 830 * Init shadow_page. 831 */ 832 spt->shadow_page.type = type; 833 daddr = dma_map_page(kdev, spt->shadow_page.page, 834 0, 4096, PCI_DMA_BIDIRECTIONAL); 835 if (dma_mapping_error(kdev, daddr)) { 836 gvt_vgpu_err("fail to map dma addr\n"); 837 ret = -EINVAL; 838 goto err_free_spt; 839 } 840 spt->shadow_page.vaddr = page_address(spt->shadow_page.page); 841 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; 842 843 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); 844 if (ret) 845 goto err_unmap_dma; 846 847 return spt; 848 849 err_unmap_dma: 850 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 851 err_free_spt: 852 free_spt(spt); 853 return ERR_PTR(ret); 854 } 855 856 /* Allocate shadow page table associated with specific gfn. */ 857 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( 858 struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, 859 unsigned long gfn, bool guest_pde_ips) 860 { 861 struct intel_vgpu_ppgtt_spt *spt; 862 int ret; 863 864 spt = ppgtt_alloc_spt(vgpu, type); 865 if (IS_ERR(spt)) 866 return spt; 867 868 /* 869 * Init guest_page. 870 */ 871 ret = intel_vgpu_register_page_track(vgpu, gfn, 872 ppgtt_write_protection_handler, spt); 873 if (ret) { 874 ppgtt_free_spt(spt); 875 return ERR_PTR(ret); 876 } 877 878 spt->guest_page.type = type; 879 spt->guest_page.gfn = gfn; 880 spt->guest_page.pde_ips = guest_pde_ips; 881 882 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); 883 884 return spt; 885 } 886 887 #define pt_entry_size_shift(spt) \ 888 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) 889 890 #define pt_entries(spt) \ 891 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) 892 893 #define for_each_present_guest_entry(spt, e, i) \ 894 for (i = 0; i < pt_entries(spt); \ 895 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ 896 if (!ppgtt_get_guest_entry(spt, e, i) && \ 897 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 898 899 #define for_each_present_shadow_entry(spt, e, i) \ 900 for (i = 0; i < pt_entries(spt); \ 901 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ 902 if (!ppgtt_get_shadow_entry(spt, e, i) && \ 903 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 904 905 #define for_each_shadow_entry(spt, e, i) \ 906 for (i = 0; i < pt_entries(spt); \ 907 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \ 908 if (!ppgtt_get_shadow_entry(spt, e, i)) 909 910 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) 911 { 912 int v = atomic_read(&spt->refcount); 913 914 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); 915 atomic_inc(&spt->refcount); 916 } 917 918 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt) 919 { 920 int v = atomic_read(&spt->refcount); 921 922 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); 923 return atomic_dec_return(&spt->refcount); 924 } 925 926 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); 927 928 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, 929 struct intel_gvt_gtt_entry *e) 930 { 931 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 932 struct intel_vgpu_ppgtt_spt *s; 933 intel_gvt_gtt_type_t cur_pt_type; 934 935 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); 936 937 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 938 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 939 cur_pt_type = get_next_pt_type(e->type) + 1; 940 if (ops->get_pfn(e) == 941 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 942 return 0; 943 } 944 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 945 if (!s) { 946 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 947 ops->get_pfn(e)); 948 return -ENXIO; 949 } 950 return ppgtt_invalidate_spt(s); 951 } 952 953 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, 954 struct intel_gvt_gtt_entry *entry) 955 { 956 struct intel_vgpu *vgpu = spt->vgpu; 957 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 958 unsigned long pfn; 959 int type; 960 961 pfn = ops->get_pfn(entry); 962 type = spt->shadow_page.type; 963 964 /* Uninitialized spte or unshadowed spte. */ 965 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) 966 return; 967 968 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); 969 } 970 971 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) 972 { 973 struct intel_vgpu *vgpu = spt->vgpu; 974 struct intel_gvt_gtt_entry e; 975 unsigned long index; 976 int ret; 977 978 trace_spt_change(spt->vgpu->id, "die", spt, 979 spt->guest_page.gfn, spt->shadow_page.type); 980 981 if (ppgtt_put_spt(spt) > 0) 982 return 0; 983 984 for_each_present_shadow_entry(spt, &e, index) { 985 switch (e.type) { 986 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 987 gvt_vdbg_mm("invalidate 4K entry\n"); 988 ppgtt_invalidate_pte(spt, &e); 989 break; 990 case GTT_TYPE_PPGTT_PTE_64K_ENTRY: 991 /* We don't setup 64K shadow entry so far. */ 992 WARN(1, "suspicious 64K gtt entry\n"); 993 continue; 994 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 995 gvt_vdbg_mm("invalidate 2M entry\n"); 996 continue; 997 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 998 WARN(1, "GVT doesn't support 1GB page\n"); 999 continue; 1000 case GTT_TYPE_PPGTT_PML4_ENTRY: 1001 case GTT_TYPE_PPGTT_PDP_ENTRY: 1002 case GTT_TYPE_PPGTT_PDE_ENTRY: 1003 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n"); 1004 ret = ppgtt_invalidate_spt_by_shadow_entry( 1005 spt->vgpu, &e); 1006 if (ret) 1007 goto fail; 1008 break; 1009 default: 1010 GEM_BUG_ON(1); 1011 } 1012 } 1013 1014 trace_spt_change(spt->vgpu->id, "release", spt, 1015 spt->guest_page.gfn, spt->shadow_page.type); 1016 ppgtt_free_spt(spt); 1017 return 0; 1018 fail: 1019 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", 1020 spt, e.val64, e.type); 1021 return ret; 1022 } 1023 1024 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) 1025 { 1026 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1027 1028 if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { 1029 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & 1030 GAMW_ECO_ENABLE_64K_IPS_FIELD; 1031 1032 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD; 1033 } else if (INTEL_GEN(dev_priv) >= 11) { 1034 /* 64K paging only controlled by IPS bit in PTE now. */ 1035 return true; 1036 } else 1037 return false; 1038 } 1039 1040 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); 1041 1042 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( 1043 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) 1044 { 1045 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1046 struct intel_vgpu_ppgtt_spt *spt = NULL; 1047 bool ips = false; 1048 int ret; 1049 1050 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); 1051 1052 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) 1053 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); 1054 1055 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); 1056 if (spt) { 1057 ppgtt_get_spt(spt); 1058 1059 if (ips != spt->guest_page.pde_ips) { 1060 spt->guest_page.pde_ips = ips; 1061 1062 gvt_dbg_mm("reshadow PDE since ips changed\n"); 1063 clear_page(spt->shadow_page.vaddr); 1064 ret = ppgtt_populate_spt(spt); 1065 if (ret) { 1066 ppgtt_put_spt(spt); 1067 goto err; 1068 } 1069 } 1070 } else { 1071 int type = get_next_pt_type(we->type); 1072 1073 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); 1074 if (IS_ERR(spt)) { 1075 ret = PTR_ERR(spt); 1076 goto err; 1077 } 1078 1079 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); 1080 if (ret) 1081 goto err_free_spt; 1082 1083 ret = ppgtt_populate_spt(spt); 1084 if (ret) 1085 goto err_free_spt; 1086 1087 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, 1088 spt->shadow_page.type); 1089 } 1090 return spt; 1091 1092 err_free_spt: 1093 ppgtt_free_spt(spt); 1094 err: 1095 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1096 spt, we->val64, we->type); 1097 return ERR_PTR(ret); 1098 } 1099 1100 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, 1101 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) 1102 { 1103 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; 1104 1105 se->type = ge->type; 1106 se->val64 = ge->val64; 1107 1108 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */ 1109 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY) 1110 ops->clear_ips(se); 1111 1112 ops->set_pfn(se, s->shadow_page.mfn); 1113 } 1114 1115 /** 1116 * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, 1117 * negtive if found err. 1118 */ 1119 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, 1120 struct intel_gvt_gtt_entry *entry) 1121 { 1122 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1123 unsigned long pfn; 1124 1125 if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) 1126 return 0; 1127 1128 pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); 1129 if (pfn == INTEL_GVT_INVALID_ADDR) 1130 return -EINVAL; 1131 1132 return PageTransHuge(pfn_to_page(pfn)); 1133 } 1134 1135 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, 1136 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 1137 struct intel_gvt_gtt_entry *se) 1138 { 1139 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1140 struct intel_vgpu_ppgtt_spt *sub_spt; 1141 struct intel_gvt_gtt_entry sub_se; 1142 unsigned long start_gfn; 1143 dma_addr_t dma_addr; 1144 unsigned long sub_index; 1145 int ret; 1146 1147 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index); 1148 1149 start_gfn = ops->get_pfn(se); 1150 1151 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT); 1152 if (IS_ERR(sub_spt)) 1153 return PTR_ERR(sub_spt); 1154 1155 for_each_shadow_entry(sub_spt, &sub_se, sub_index) { 1156 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, 1157 start_gfn + sub_index, PAGE_SIZE, &dma_addr); 1158 if (ret) { 1159 ppgtt_invalidate_spt(spt); 1160 return ret; 1161 } 1162 sub_se.val64 = se->val64; 1163 1164 /* Copy the PAT field from PDE. */ 1165 sub_se.val64 &= ~_PAGE_PAT; 1166 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5; 1167 1168 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT); 1169 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index); 1170 } 1171 1172 /* Clear dirty field. */ 1173 se->val64 &= ~_PAGE_DIRTY; 1174 1175 ops->clear_pse(se); 1176 ops->clear_ips(se); 1177 ops->set_pfn(se, sub_spt->shadow_page.mfn); 1178 ppgtt_set_shadow_entry(spt, se, index); 1179 return 0; 1180 } 1181 1182 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, 1183 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 1184 struct intel_gvt_gtt_entry *se) 1185 { 1186 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1187 struct intel_gvt_gtt_entry entry = *se; 1188 unsigned long start_gfn; 1189 dma_addr_t dma_addr; 1190 int i, ret; 1191 1192 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index); 1193 1194 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE); 1195 1196 start_gfn = ops->get_pfn(se); 1197 1198 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; 1199 ops->set_64k_splited(&entry); 1200 1201 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { 1202 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, 1203 start_gfn + i, PAGE_SIZE, &dma_addr); 1204 if (ret) 1205 return ret; 1206 1207 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT); 1208 ppgtt_set_shadow_entry(spt, &entry, index + i); 1209 } 1210 return 0; 1211 } 1212 1213 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, 1214 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 1215 struct intel_gvt_gtt_entry *ge) 1216 { 1217 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 1218 struct intel_gvt_gtt_entry se = *ge; 1219 unsigned long gfn, page_size = PAGE_SIZE; 1220 dma_addr_t dma_addr; 1221 int ret; 1222 1223 if (!pte_ops->test_present(ge)) 1224 return 0; 1225 1226 gfn = pte_ops->get_pfn(ge); 1227 1228 switch (ge->type) { 1229 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 1230 gvt_vdbg_mm("shadow 4K gtt entry\n"); 1231 break; 1232 case GTT_TYPE_PPGTT_PTE_64K_ENTRY: 1233 gvt_vdbg_mm("shadow 64K gtt entry\n"); 1234 /* 1235 * The layout of 64K page is special, the page size is 1236 * controlled by uper PDE. To be simple, we always split 1237 * 64K page to smaller 4K pages in shadow PT. 1238 */ 1239 return split_64KB_gtt_entry(vgpu, spt, index, &se); 1240 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 1241 gvt_vdbg_mm("shadow 2M gtt entry\n"); 1242 ret = is_2MB_gtt_possible(vgpu, ge); 1243 if (ret == 0) 1244 return split_2MB_gtt_entry(vgpu, spt, index, &se); 1245 else if (ret < 0) 1246 return ret; 1247 page_size = I915_GTT_PAGE_SIZE_2M; 1248 break; 1249 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 1250 gvt_vgpu_err("GVT doesn't support 1GB entry\n"); 1251 return -EINVAL; 1252 default: 1253 GEM_BUG_ON(1); 1254 }; 1255 1256 /* direct shadow */ 1257 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, 1258 &dma_addr); 1259 if (ret) 1260 return -ENXIO; 1261 1262 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); 1263 ppgtt_set_shadow_entry(spt, &se, index); 1264 return 0; 1265 } 1266 1267 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) 1268 { 1269 struct intel_vgpu *vgpu = spt->vgpu; 1270 struct intel_gvt *gvt = vgpu->gvt; 1271 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1272 struct intel_vgpu_ppgtt_spt *s; 1273 struct intel_gvt_gtt_entry se, ge; 1274 unsigned long gfn, i; 1275 int ret; 1276 1277 trace_spt_change(spt->vgpu->id, "born", spt, 1278 spt->guest_page.gfn, spt->shadow_page.type); 1279 1280 for_each_present_guest_entry(spt, &ge, i) { 1281 if (gtt_type_is_pt(get_next_pt_type(ge.type))) { 1282 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1283 if (IS_ERR(s)) { 1284 ret = PTR_ERR(s); 1285 goto fail; 1286 } 1287 ppgtt_get_shadow_entry(spt, &se, i); 1288 ppgtt_generate_shadow_entry(&se, s, &ge); 1289 ppgtt_set_shadow_entry(spt, &se, i); 1290 } else { 1291 gfn = ops->get_pfn(&ge); 1292 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { 1293 ops->set_pfn(&se, gvt->gtt.scratch_mfn); 1294 ppgtt_set_shadow_entry(spt, &se, i); 1295 continue; 1296 } 1297 1298 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); 1299 if (ret) 1300 goto fail; 1301 } 1302 } 1303 return 0; 1304 fail: 1305 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1306 spt, ge.val64, ge.type); 1307 return ret; 1308 } 1309 1310 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, 1311 struct intel_gvt_gtt_entry *se, unsigned long index) 1312 { 1313 struct intel_vgpu *vgpu = spt->vgpu; 1314 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1315 int ret; 1316 1317 trace_spt_guest_change(spt->vgpu->id, "remove", spt, 1318 spt->shadow_page.type, se->val64, index); 1319 1320 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", 1321 se->type, index, se->val64); 1322 1323 if (!ops->test_present(se)) 1324 return 0; 1325 1326 if (ops->get_pfn(se) == 1327 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) 1328 return 0; 1329 1330 if (gtt_type_is_pt(get_next_pt_type(se->type))) { 1331 struct intel_vgpu_ppgtt_spt *s = 1332 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); 1333 if (!s) { 1334 gvt_vgpu_err("fail to find guest page\n"); 1335 ret = -ENXIO; 1336 goto fail; 1337 } 1338 ret = ppgtt_invalidate_spt(s); 1339 if (ret) 1340 goto fail; 1341 } else { 1342 /* We don't setup 64K shadow entry so far. */ 1343 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY, 1344 "suspicious 64K entry\n"); 1345 ppgtt_invalidate_pte(spt, se); 1346 } 1347 1348 return 0; 1349 fail: 1350 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1351 spt, se->val64, se->type); 1352 return ret; 1353 } 1354 1355 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, 1356 struct intel_gvt_gtt_entry *we, unsigned long index) 1357 { 1358 struct intel_vgpu *vgpu = spt->vgpu; 1359 struct intel_gvt_gtt_entry m; 1360 struct intel_vgpu_ppgtt_spt *s; 1361 int ret; 1362 1363 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, 1364 we->val64, index); 1365 1366 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", 1367 we->type, index, we->val64); 1368 1369 if (gtt_type_is_pt(get_next_pt_type(we->type))) { 1370 s = ppgtt_populate_spt_by_guest_entry(vgpu, we); 1371 if (IS_ERR(s)) { 1372 ret = PTR_ERR(s); 1373 goto fail; 1374 } 1375 ppgtt_get_shadow_entry(spt, &m, index); 1376 ppgtt_generate_shadow_entry(&m, s, we); 1377 ppgtt_set_shadow_entry(spt, &m, index); 1378 } else { 1379 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we); 1380 if (ret) 1381 goto fail; 1382 } 1383 return 0; 1384 fail: 1385 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", 1386 spt, we->val64, we->type); 1387 return ret; 1388 } 1389 1390 static int sync_oos_page(struct intel_vgpu *vgpu, 1391 struct intel_vgpu_oos_page *oos_page) 1392 { 1393 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1394 struct intel_gvt *gvt = vgpu->gvt; 1395 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1396 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1397 struct intel_gvt_gtt_entry old, new; 1398 int index; 1399 int ret; 1400 1401 trace_oos_change(vgpu->id, "sync", oos_page->id, 1402 spt, spt->guest_page.type); 1403 1404 old.type = new.type = get_entry_type(spt->guest_page.type); 1405 old.val64 = new.val64 = 0; 1406 1407 for (index = 0; index < (I915_GTT_PAGE_SIZE >> 1408 info->gtt_entry_size_shift); index++) { 1409 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); 1410 ops->get_entry(NULL, &new, index, true, 1411 spt->guest_page.gfn << PAGE_SHIFT, vgpu); 1412 1413 if (old.val64 == new.val64 1414 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) 1415 continue; 1416 1417 trace_oos_sync(vgpu->id, oos_page->id, 1418 spt, spt->guest_page.type, 1419 new.val64, index); 1420 1421 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); 1422 if (ret) 1423 return ret; 1424 1425 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); 1426 } 1427 1428 spt->guest_page.write_cnt = 0; 1429 list_del_init(&spt->post_shadow_list); 1430 return 0; 1431 } 1432 1433 static int detach_oos_page(struct intel_vgpu *vgpu, 1434 struct intel_vgpu_oos_page *oos_page) 1435 { 1436 struct intel_gvt *gvt = vgpu->gvt; 1437 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1438 1439 trace_oos_change(vgpu->id, "detach", oos_page->id, 1440 spt, spt->guest_page.type); 1441 1442 spt->guest_page.write_cnt = 0; 1443 spt->guest_page.oos_page = NULL; 1444 oos_page->spt = NULL; 1445 1446 list_del_init(&oos_page->vm_list); 1447 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); 1448 1449 return 0; 1450 } 1451 1452 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, 1453 struct intel_vgpu_ppgtt_spt *spt) 1454 { 1455 struct intel_gvt *gvt = spt->vgpu->gvt; 1456 int ret; 1457 1458 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, 1459 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 1460 oos_page->mem, I915_GTT_PAGE_SIZE); 1461 if (ret) 1462 return ret; 1463 1464 oos_page->spt = spt; 1465 spt->guest_page.oos_page = oos_page; 1466 1467 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); 1468 1469 trace_oos_change(spt->vgpu->id, "attach", oos_page->id, 1470 spt, spt->guest_page.type); 1471 return 0; 1472 } 1473 1474 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) 1475 { 1476 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1477 int ret; 1478 1479 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); 1480 if (ret) 1481 return ret; 1482 1483 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, 1484 spt, spt->guest_page.type); 1485 1486 list_del_init(&oos_page->vm_list); 1487 return sync_oos_page(spt->vgpu, oos_page); 1488 } 1489 1490 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) 1491 { 1492 struct intel_gvt *gvt = spt->vgpu->gvt; 1493 struct intel_gvt_gtt *gtt = &gvt->gtt; 1494 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1495 int ret; 1496 1497 WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); 1498 1499 if (list_empty(>t->oos_page_free_list_head)) { 1500 oos_page = container_of(gtt->oos_page_use_list_head.next, 1501 struct intel_vgpu_oos_page, list); 1502 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1503 if (ret) 1504 return ret; 1505 ret = detach_oos_page(spt->vgpu, oos_page); 1506 if (ret) 1507 return ret; 1508 } else 1509 oos_page = container_of(gtt->oos_page_free_list_head.next, 1510 struct intel_vgpu_oos_page, list); 1511 return attach_oos_page(oos_page, spt); 1512 } 1513 1514 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) 1515 { 1516 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1517 1518 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) 1519 return -EINVAL; 1520 1521 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, 1522 spt, spt->guest_page.type); 1523 1524 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); 1525 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); 1526 } 1527 1528 /** 1529 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU 1530 * @vgpu: a vGPU 1531 * 1532 * This function is called before submitting a guest workload to host, 1533 * to sync all the out-of-synced shadow for vGPU 1534 * 1535 * Returns: 1536 * Zero on success, negative error code if failed. 1537 */ 1538 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) 1539 { 1540 struct list_head *pos, *n; 1541 struct intel_vgpu_oos_page *oos_page; 1542 int ret; 1543 1544 if (!enable_out_of_sync) 1545 return 0; 1546 1547 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { 1548 oos_page = container_of(pos, 1549 struct intel_vgpu_oos_page, vm_list); 1550 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1551 if (ret) 1552 return ret; 1553 } 1554 return 0; 1555 } 1556 1557 /* 1558 * The heart of PPGTT shadow page table. 1559 */ 1560 static int ppgtt_handle_guest_write_page_table( 1561 struct intel_vgpu_ppgtt_spt *spt, 1562 struct intel_gvt_gtt_entry *we, unsigned long index) 1563 { 1564 struct intel_vgpu *vgpu = spt->vgpu; 1565 int type = spt->shadow_page.type; 1566 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1567 struct intel_gvt_gtt_entry old_se; 1568 int new_present; 1569 int i, ret; 1570 1571 new_present = ops->test_present(we); 1572 1573 /* 1574 * Adding the new entry first and then removing the old one, that can 1575 * guarantee the ppgtt table is validated during the window between 1576 * adding and removal. 1577 */ 1578 ppgtt_get_shadow_entry(spt, &old_se, index); 1579 1580 if (new_present) { 1581 ret = ppgtt_handle_guest_entry_add(spt, we, index); 1582 if (ret) 1583 goto fail; 1584 } 1585 1586 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index); 1587 if (ret) 1588 goto fail; 1589 1590 if (!new_present) { 1591 /* For 64KB splited entries, we need clear them all. */ 1592 if (ops->test_64k_splited(&old_se) && 1593 !(index % GTT_64K_PTE_STRIDE)) { 1594 gvt_vdbg_mm("remove splited 64K shadow entries\n"); 1595 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { 1596 ops->clear_64k_splited(&old_se); 1597 ops->set_pfn(&old_se, 1598 vgpu->gtt.scratch_pt[type].page_mfn); 1599 ppgtt_set_shadow_entry(spt, &old_se, index + i); 1600 } 1601 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY || 1602 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 1603 ops->clear_pse(&old_se); 1604 ops->set_pfn(&old_se, 1605 vgpu->gtt.scratch_pt[type].page_mfn); 1606 ppgtt_set_shadow_entry(spt, &old_se, index); 1607 } else { 1608 ops->set_pfn(&old_se, 1609 vgpu->gtt.scratch_pt[type].page_mfn); 1610 ppgtt_set_shadow_entry(spt, &old_se, index); 1611 } 1612 } 1613 1614 return 0; 1615 fail: 1616 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", 1617 spt, we->val64, we->type); 1618 return ret; 1619 } 1620 1621 1622 1623 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt) 1624 { 1625 return enable_out_of_sync 1626 && gtt_type_is_pte_pt(spt->guest_page.type) 1627 && spt->guest_page.write_cnt >= 2; 1628 } 1629 1630 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, 1631 unsigned long index) 1632 { 1633 set_bit(index, spt->post_shadow_bitmap); 1634 if (!list_empty(&spt->post_shadow_list)) 1635 return; 1636 1637 list_add_tail(&spt->post_shadow_list, 1638 &spt->vgpu->gtt.post_shadow_list_head); 1639 } 1640 1641 /** 1642 * intel_vgpu_flush_post_shadow - flush the post shadow transactions 1643 * @vgpu: a vGPU 1644 * 1645 * This function is called before submitting a guest workload to host, 1646 * to flush all the post shadows for a vGPU. 1647 * 1648 * Returns: 1649 * Zero on success, negative error code if failed. 1650 */ 1651 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) 1652 { 1653 struct list_head *pos, *n; 1654 struct intel_vgpu_ppgtt_spt *spt; 1655 struct intel_gvt_gtt_entry ge; 1656 unsigned long index; 1657 int ret; 1658 1659 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { 1660 spt = container_of(pos, struct intel_vgpu_ppgtt_spt, 1661 post_shadow_list); 1662 1663 for_each_set_bit(index, spt->post_shadow_bitmap, 1664 GTT_ENTRY_NUM_IN_ONE_PAGE) { 1665 ppgtt_get_guest_entry(spt, &ge, index); 1666 1667 ret = ppgtt_handle_guest_write_page_table(spt, 1668 &ge, index); 1669 if (ret) 1670 return ret; 1671 clear_bit(index, spt->post_shadow_bitmap); 1672 } 1673 list_del_init(&spt->post_shadow_list); 1674 } 1675 return 0; 1676 } 1677 1678 static int ppgtt_handle_guest_write_page_table_bytes( 1679 struct intel_vgpu_ppgtt_spt *spt, 1680 u64 pa, void *p_data, int bytes) 1681 { 1682 struct intel_vgpu *vgpu = spt->vgpu; 1683 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1684 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1685 struct intel_gvt_gtt_entry we, se; 1686 unsigned long index; 1687 int ret; 1688 1689 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; 1690 1691 ppgtt_get_guest_entry(spt, &we, index); 1692 1693 /* 1694 * For page table which has 64K gtt entry, only PTE#0, PTE#16, 1695 * PTE#32, ... PTE#496 are used. Unused PTEs update should be 1696 * ignored. 1697 */ 1698 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY && 1699 (index % GTT_64K_PTE_STRIDE)) { 1700 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n", 1701 index); 1702 return 0; 1703 } 1704 1705 if (bytes == info->gtt_entry_size) { 1706 ret = ppgtt_handle_guest_write_page_table(spt, &we, index); 1707 if (ret) 1708 return ret; 1709 } else { 1710 if (!test_bit(index, spt->post_shadow_bitmap)) { 1711 int type = spt->shadow_page.type; 1712 1713 ppgtt_get_shadow_entry(spt, &se, index); 1714 ret = ppgtt_handle_guest_entry_removal(spt, &se, index); 1715 if (ret) 1716 return ret; 1717 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); 1718 ppgtt_set_shadow_entry(spt, &se, index); 1719 } 1720 ppgtt_set_post_shadow(spt, index); 1721 } 1722 1723 if (!enable_out_of_sync) 1724 return 0; 1725 1726 spt->guest_page.write_cnt++; 1727 1728 if (spt->guest_page.oos_page) 1729 ops->set_entry(spt->guest_page.oos_page->mem, &we, index, 1730 false, 0, vgpu); 1731 1732 if (can_do_out_of_sync(spt)) { 1733 if (!spt->guest_page.oos_page) 1734 ppgtt_allocate_oos_page(spt); 1735 1736 ret = ppgtt_set_guest_page_oos(spt); 1737 if (ret < 0) 1738 return ret; 1739 } 1740 return 0; 1741 } 1742 1743 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) 1744 { 1745 struct intel_vgpu *vgpu = mm->vgpu; 1746 struct intel_gvt *gvt = vgpu->gvt; 1747 struct intel_gvt_gtt *gtt = &gvt->gtt; 1748 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1749 struct intel_gvt_gtt_entry se; 1750 int index; 1751 1752 if (!mm->ppgtt_mm.shadowed) 1753 return; 1754 1755 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { 1756 ppgtt_get_shadow_root_entry(mm, &se, index); 1757 1758 if (!ops->test_present(&se)) 1759 continue; 1760 1761 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se); 1762 se.val64 = 0; 1763 ppgtt_set_shadow_root_entry(mm, &se, index); 1764 1765 trace_spt_guest_change(vgpu->id, "destroy root pointer", 1766 NULL, se.type, se.val64, index); 1767 } 1768 1769 mm->ppgtt_mm.shadowed = false; 1770 } 1771 1772 1773 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) 1774 { 1775 struct intel_vgpu *vgpu = mm->vgpu; 1776 struct intel_gvt *gvt = vgpu->gvt; 1777 struct intel_gvt_gtt *gtt = &gvt->gtt; 1778 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1779 struct intel_vgpu_ppgtt_spt *spt; 1780 struct intel_gvt_gtt_entry ge, se; 1781 int index, ret; 1782 1783 if (mm->ppgtt_mm.shadowed) 1784 return 0; 1785 1786 mm->ppgtt_mm.shadowed = true; 1787 1788 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { 1789 ppgtt_get_guest_root_entry(mm, &ge, index); 1790 1791 if (!ops->test_present(&ge)) 1792 continue; 1793 1794 trace_spt_guest_change(vgpu->id, __func__, NULL, 1795 ge.type, ge.val64, index); 1796 1797 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1798 if (IS_ERR(spt)) { 1799 gvt_vgpu_err("fail to populate guest root pointer\n"); 1800 ret = PTR_ERR(spt); 1801 goto fail; 1802 } 1803 ppgtt_generate_shadow_entry(&se, spt, &ge); 1804 ppgtt_set_shadow_root_entry(mm, &se, index); 1805 1806 trace_spt_guest_change(vgpu->id, "populate root pointer", 1807 NULL, se.type, se.val64, index); 1808 } 1809 1810 return 0; 1811 fail: 1812 invalidate_ppgtt_mm(mm); 1813 return ret; 1814 } 1815 1816 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu) 1817 { 1818 struct intel_vgpu_mm *mm; 1819 1820 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1821 if (!mm) 1822 return NULL; 1823 1824 mm->vgpu = vgpu; 1825 kref_init(&mm->ref); 1826 atomic_set(&mm->pincount, 0); 1827 1828 return mm; 1829 } 1830 1831 static void vgpu_free_mm(struct intel_vgpu_mm *mm) 1832 { 1833 kfree(mm); 1834 } 1835 1836 /** 1837 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU 1838 * @vgpu: a vGPU 1839 * @root_entry_type: ppgtt root entry type 1840 * @pdps: guest pdps. 1841 * 1842 * This function is used to create a ppgtt mm object for a vGPU. 1843 * 1844 * Returns: 1845 * Zero on success, negative error code in pointer if failed. 1846 */ 1847 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 1848 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 1849 { 1850 struct intel_gvt *gvt = vgpu->gvt; 1851 struct intel_vgpu_mm *mm; 1852 int ret; 1853 1854 mm = vgpu_alloc_mm(vgpu); 1855 if (!mm) 1856 return ERR_PTR(-ENOMEM); 1857 1858 mm->type = INTEL_GVT_MM_PPGTT; 1859 1860 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && 1861 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY); 1862 mm->ppgtt_mm.root_entry_type = root_entry_type; 1863 1864 INIT_LIST_HEAD(&mm->ppgtt_mm.list); 1865 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); 1866 1867 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 1868 mm->ppgtt_mm.guest_pdps[0] = pdps[0]; 1869 else 1870 memcpy(mm->ppgtt_mm.guest_pdps, pdps, 1871 sizeof(mm->ppgtt_mm.guest_pdps)); 1872 1873 ret = shadow_ppgtt_mm(mm); 1874 if (ret) { 1875 gvt_vgpu_err("failed to shadow ppgtt mm\n"); 1876 vgpu_free_mm(mm); 1877 return ERR_PTR(ret); 1878 } 1879 1880 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1881 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1882 return mm; 1883 } 1884 1885 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) 1886 { 1887 struct intel_vgpu_mm *mm; 1888 unsigned long nr_entries; 1889 1890 mm = vgpu_alloc_mm(vgpu); 1891 if (!mm) 1892 return ERR_PTR(-ENOMEM); 1893 1894 mm->type = INTEL_GVT_MM_GGTT; 1895 1896 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; 1897 mm->ggtt_mm.virtual_ggtt = 1898 vzalloc(array_size(nr_entries, 1899 vgpu->gvt->device_info.gtt_entry_size)); 1900 if (!mm->ggtt_mm.virtual_ggtt) { 1901 vgpu_free_mm(mm); 1902 return ERR_PTR(-ENOMEM); 1903 } 1904 mm->ggtt_mm.last_partial_off = -1UL; 1905 1906 return mm; 1907 } 1908 1909 /** 1910 * _intel_vgpu_mm_release - destroy a mm object 1911 * @mm_ref: a kref object 1912 * 1913 * This function is used to destroy a mm object for vGPU 1914 * 1915 */ 1916 void _intel_vgpu_mm_release(struct kref *mm_ref) 1917 { 1918 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1919 1920 if (GEM_WARN_ON(atomic_read(&mm->pincount))) 1921 gvt_err("vgpu mm pin count bug detected\n"); 1922 1923 if (mm->type == INTEL_GVT_MM_PPGTT) { 1924 list_del(&mm->ppgtt_mm.list); 1925 list_del(&mm->ppgtt_mm.lru_list); 1926 invalidate_ppgtt_mm(mm); 1927 } else { 1928 vfree(mm->ggtt_mm.virtual_ggtt); 1929 mm->ggtt_mm.last_partial_off = -1UL; 1930 } 1931 1932 vgpu_free_mm(mm); 1933 } 1934 1935 /** 1936 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object 1937 * @mm: a vGPU mm object 1938 * 1939 * This function is called when user doesn't want to use a vGPU mm object 1940 */ 1941 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1942 { 1943 atomic_dec(&mm->pincount); 1944 } 1945 1946 /** 1947 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object 1948 * @vgpu: a vGPU 1949 * 1950 * This function is called when user wants to use a vGPU mm object. If this 1951 * mm object hasn't been shadowed yet, the shadow will be populated at this 1952 * time. 1953 * 1954 * Returns: 1955 * Zero on success, negative error code if failed. 1956 */ 1957 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) 1958 { 1959 int ret; 1960 1961 atomic_inc(&mm->pincount); 1962 1963 if (mm->type == INTEL_GVT_MM_PPGTT) { 1964 ret = shadow_ppgtt_mm(mm); 1965 if (ret) 1966 return ret; 1967 1968 list_move_tail(&mm->ppgtt_mm.lru_list, 1969 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 1970 1971 } 1972 1973 return 0; 1974 } 1975 1976 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) 1977 { 1978 struct intel_vgpu_mm *mm; 1979 struct list_head *pos, *n; 1980 1981 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 1982 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 1983 1984 if (atomic_read(&mm->pincount)) 1985 continue; 1986 1987 list_del_init(&mm->ppgtt_mm.lru_list); 1988 invalidate_ppgtt_mm(mm); 1989 return 1; 1990 } 1991 return 0; 1992 } 1993 1994 /* 1995 * GMA translation APIs. 1996 */ 1997 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, 1998 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) 1999 { 2000 struct intel_vgpu *vgpu = mm->vgpu; 2001 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2002 struct intel_vgpu_ppgtt_spt *s; 2003 2004 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 2005 if (!s) 2006 return -ENXIO; 2007 2008 if (!guest) 2009 ppgtt_get_shadow_entry(s, e, index); 2010 else 2011 ppgtt_get_guest_entry(s, e, index); 2012 return 0; 2013 } 2014 2015 /** 2016 * intel_vgpu_gma_to_gpa - translate a gma to GPA 2017 * @mm: mm object. could be a PPGTT or GGTT mm object 2018 * @gma: graphics memory address in this mm object 2019 * 2020 * This function is used to translate a graphics memory address in specific 2021 * graphics memory space to guest physical address. 2022 * 2023 * Returns: 2024 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed. 2025 */ 2026 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) 2027 { 2028 struct intel_vgpu *vgpu = mm->vgpu; 2029 struct intel_gvt *gvt = vgpu->gvt; 2030 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; 2031 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; 2032 unsigned long gpa = INTEL_GVT_INVALID_ADDR; 2033 unsigned long gma_index[4]; 2034 struct intel_gvt_gtt_entry e; 2035 int i, levels = 0; 2036 int ret; 2037 2038 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT && 2039 mm->type != INTEL_GVT_MM_PPGTT); 2040 2041 if (mm->type == INTEL_GVT_MM_GGTT) { 2042 if (!vgpu_gmadr_is_valid(vgpu, gma)) 2043 goto err; 2044 2045 ggtt_get_guest_entry(mm, &e, 2046 gma_ops->gma_to_ggtt_pte_index(gma)); 2047 2048 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 2049 + (gma & ~I915_GTT_PAGE_MASK); 2050 2051 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); 2052 } else { 2053 switch (mm->ppgtt_mm.root_entry_type) { 2054 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 2055 ppgtt_get_shadow_root_entry(mm, &e, 0); 2056 2057 gma_index[0] = gma_ops->gma_to_pml4_index(gma); 2058 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 2059 gma_index[2] = gma_ops->gma_to_pde_index(gma); 2060 gma_index[3] = gma_ops->gma_to_pte_index(gma); 2061 levels = 4; 2062 break; 2063 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 2064 ppgtt_get_shadow_root_entry(mm, &e, 2065 gma_ops->gma_to_l3_pdp_index(gma)); 2066 2067 gma_index[0] = gma_ops->gma_to_pde_index(gma); 2068 gma_index[1] = gma_ops->gma_to_pte_index(gma); 2069 levels = 2; 2070 break; 2071 default: 2072 GEM_BUG_ON(1); 2073 } 2074 2075 /* walk the shadow page table and get gpa from guest entry */ 2076 for (i = 0; i < levels; i++) { 2077 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 2078 (i == levels - 1)); 2079 if (ret) 2080 goto err; 2081 2082 if (!pte_ops->test_present(&e)) { 2083 gvt_dbg_core("GMA 0x%lx is not present\n", gma); 2084 goto err; 2085 } 2086 } 2087 2088 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + 2089 (gma & ~I915_GTT_PAGE_MASK); 2090 trace_gma_translate(vgpu->id, "ppgtt", 0, 2091 mm->ppgtt_mm.root_entry_type, gma, gpa); 2092 } 2093 2094 return gpa; 2095 err: 2096 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 2097 return INTEL_GVT_INVALID_ADDR; 2098 } 2099 2100 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, 2101 unsigned int off, void *p_data, unsigned int bytes) 2102 { 2103 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 2104 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 2105 unsigned long index = off >> info->gtt_entry_size_shift; 2106 struct intel_gvt_gtt_entry e; 2107 2108 if (bytes != 4 && bytes != 8) 2109 return -EINVAL; 2110 2111 ggtt_get_guest_entry(ggtt_mm, &e, index); 2112 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), 2113 bytes); 2114 return 0; 2115 } 2116 2117 /** 2118 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read 2119 * @vgpu: a vGPU 2120 * @off: register offset 2121 * @p_data: data will be returned to guest 2122 * @bytes: data length 2123 * 2124 * This function is used to emulate the GTT MMIO register read 2125 * 2126 * Returns: 2127 * Zero on success, error code if failed. 2128 */ 2129 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 2130 void *p_data, unsigned int bytes) 2131 { 2132 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 2133 int ret; 2134 2135 if (bytes != 4 && bytes != 8) 2136 return -EINVAL; 2137 2138 off -= info->gtt_start_offset; 2139 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes); 2140 return ret; 2141 } 2142 2143 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, 2144 struct intel_gvt_gtt_entry *entry) 2145 { 2146 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2147 unsigned long pfn; 2148 2149 pfn = pte_ops->get_pfn(entry); 2150 if (pfn != vgpu->gvt->gtt.scratch_mfn) 2151 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, 2152 pfn << PAGE_SHIFT); 2153 } 2154 2155 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 2156 void *p_data, unsigned int bytes) 2157 { 2158 struct intel_gvt *gvt = vgpu->gvt; 2159 const struct intel_gvt_device_info *info = &gvt->device_info; 2160 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 2161 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 2162 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 2163 unsigned long gma, gfn; 2164 struct intel_gvt_gtt_entry e, m; 2165 dma_addr_t dma_addr; 2166 int ret; 2167 2168 if (bytes != 4 && bytes != 8) 2169 return -EINVAL; 2170 2171 gma = g_gtt_index << I915_GTT_PAGE_SHIFT; 2172 2173 /* the VM may configure the whole GM space when ballooning is used */ 2174 if (!vgpu_gmadr_is_valid(vgpu, gma)) 2175 return 0; 2176 2177 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 2178 2179 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 2180 bytes); 2181 2182 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes 2183 * write, we assume the two 4 bytes writes are consecutive. 2184 * Otherwise, we abort and report error 2185 */ 2186 if (bytes < info->gtt_entry_size) { 2187 if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { 2188 /* the first partial part*/ 2189 ggtt_mm->ggtt_mm.last_partial_off = off; 2190 ggtt_mm->ggtt_mm.last_partial_data = e.val64; 2191 return 0; 2192 } else if ((g_gtt_index == 2193 (ggtt_mm->ggtt_mm.last_partial_off >> 2194 info->gtt_entry_size_shift)) && 2195 (off != ggtt_mm->ggtt_mm.last_partial_off)) { 2196 /* the second partial part */ 2197 2198 int last_off = ggtt_mm->ggtt_mm.last_partial_off & 2199 (info->gtt_entry_size - 1); 2200 2201 memcpy((void *)&e.val64 + last_off, 2202 (void *)&ggtt_mm->ggtt_mm.last_partial_data + 2203 last_off, bytes); 2204 2205 ggtt_mm->ggtt_mm.last_partial_off = -1UL; 2206 } else { 2207 int last_offset; 2208 2209 gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", 2210 ggtt_mm->ggtt_mm.last_partial_off, off, 2211 bytes, info->gtt_entry_size); 2212 2213 /* set host ggtt entry to scratch page and clear 2214 * virtual ggtt entry as not present for last 2215 * partially write offset 2216 */ 2217 last_offset = ggtt_mm->ggtt_mm.last_partial_off & 2218 (~(info->gtt_entry_size - 1)); 2219 2220 ggtt_get_host_entry(ggtt_mm, &m, last_offset); 2221 ggtt_invalidate_pte(vgpu, &m); 2222 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2223 ops->clear_present(&m); 2224 ggtt_set_host_entry(ggtt_mm, &m, last_offset); 2225 ggtt_invalidate(gvt->dev_priv); 2226 2227 ggtt_get_guest_entry(ggtt_mm, &e, last_offset); 2228 ops->clear_present(&e); 2229 ggtt_set_guest_entry(ggtt_mm, &e, last_offset); 2230 2231 ggtt_mm->ggtt_mm.last_partial_off = off; 2232 ggtt_mm->ggtt_mm.last_partial_data = e.val64; 2233 2234 return 0; 2235 } 2236 } 2237 2238 if (ops->test_present(&e)) { 2239 gfn = ops->get_pfn(&e); 2240 m = e; 2241 2242 /* one PTE update may be issued in multiple writes and the 2243 * first write may not construct a valid gfn 2244 */ 2245 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { 2246 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2247 goto out; 2248 } 2249 2250 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, 2251 PAGE_SIZE, &dma_addr); 2252 if (ret) { 2253 gvt_vgpu_err("fail to populate guest ggtt entry\n"); 2254 /* guest driver may read/write the entry when partial 2255 * update the entry in this situation p2m will fail 2256 * settting the shadow entry to point to a scratch page 2257 */ 2258 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2259 } else 2260 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 2261 } else { 2262 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); 2263 ggtt_invalidate_pte(vgpu, &m); 2264 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2265 ops->clear_present(&m); 2266 } 2267 2268 out: 2269 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 2270 ggtt_invalidate(gvt->dev_priv); 2271 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 2272 return 0; 2273 } 2274 2275 /* 2276 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write 2277 * @vgpu: a vGPU 2278 * @off: register offset 2279 * @p_data: data from guest write 2280 * @bytes: data length 2281 * 2282 * This function is used to emulate the GTT MMIO register write 2283 * 2284 * Returns: 2285 * Zero on success, error code if failed. 2286 */ 2287 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, 2288 unsigned int off, void *p_data, unsigned int bytes) 2289 { 2290 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 2291 int ret; 2292 2293 if (bytes != 4 && bytes != 8) 2294 return -EINVAL; 2295 2296 off -= info->gtt_start_offset; 2297 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); 2298 return ret; 2299 } 2300 2301 static int alloc_scratch_pages(struct intel_vgpu *vgpu, 2302 intel_gvt_gtt_type_t type) 2303 { 2304 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2305 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2306 int page_entry_num = I915_GTT_PAGE_SIZE >> 2307 vgpu->gvt->device_info.gtt_entry_size_shift; 2308 void *scratch_pt; 2309 int i; 2310 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 2311 dma_addr_t daddr; 2312 2313 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 2314 return -EINVAL; 2315 2316 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 2317 if (!scratch_pt) { 2318 gvt_vgpu_err("fail to allocate scratch page\n"); 2319 return -ENOMEM; 2320 } 2321 2322 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 2323 4096, PCI_DMA_BIDIRECTIONAL); 2324 if (dma_mapping_error(dev, daddr)) { 2325 gvt_vgpu_err("fail to dmamap scratch_pt\n"); 2326 __free_page(virt_to_page(scratch_pt)); 2327 return -ENOMEM; 2328 } 2329 gtt->scratch_pt[type].page_mfn = 2330 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2331 gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 2332 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 2333 vgpu->id, type, gtt->scratch_pt[type].page_mfn); 2334 2335 /* Build the tree by full filled the scratch pt with the entries which 2336 * point to the next level scratch pt or scratch page. The 2337 * scratch_pt[type] indicate the scratch pt/scratch page used by the 2338 * 'type' pt. 2339 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 2340 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 2341 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 2342 */ 2343 if (type > GTT_TYPE_PPGTT_PTE_PT) { 2344 struct intel_gvt_gtt_entry se; 2345 2346 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); 2347 se.type = get_entry_type(type - 1); 2348 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); 2349 2350 /* The entry parameters like present/writeable/cache type 2351 * set to the same as i915's scratch page tree. 2352 */ 2353 se.val64 |= _PAGE_PRESENT | _PAGE_RW; 2354 if (type == GTT_TYPE_PPGTT_PDE_PT) 2355 se.val64 |= PPAT_CACHED; 2356 2357 for (i = 0; i < page_entry_num; i++) 2358 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 2359 } 2360 2361 return 0; 2362 } 2363 2364 static int release_scratch_page_tree(struct intel_vgpu *vgpu) 2365 { 2366 int i; 2367 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 2368 dma_addr_t daddr; 2369 2370 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2371 if (vgpu->gtt.scratch_pt[i].page != NULL) { 2372 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << 2373 I915_GTT_PAGE_SHIFT); 2374 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2375 __free_page(vgpu->gtt.scratch_pt[i].page); 2376 vgpu->gtt.scratch_pt[i].page = NULL; 2377 vgpu->gtt.scratch_pt[i].page_mfn = 0; 2378 } 2379 } 2380 2381 return 0; 2382 } 2383 2384 static int create_scratch_page_tree(struct intel_vgpu *vgpu) 2385 { 2386 int i, ret; 2387 2388 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2389 ret = alloc_scratch_pages(vgpu, i); 2390 if (ret) 2391 goto err; 2392 } 2393 2394 return 0; 2395 2396 err: 2397 release_scratch_page_tree(vgpu); 2398 return ret; 2399 } 2400 2401 /** 2402 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization 2403 * @vgpu: a vGPU 2404 * 2405 * This function is used to initialize per-vGPU graphics memory virtualization 2406 * components. 2407 * 2408 * Returns: 2409 * Zero on success, error code if failed. 2410 */ 2411 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) 2412 { 2413 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2414 2415 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL); 2416 2417 INIT_LIST_HEAD(>t->ppgtt_mm_list_head); 2418 INIT_LIST_HEAD(>t->oos_page_list_head); 2419 INIT_LIST_HEAD(>t->post_shadow_list_head); 2420 2421 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu); 2422 if (IS_ERR(gtt->ggtt_mm)) { 2423 gvt_vgpu_err("fail to create mm for ggtt.\n"); 2424 return PTR_ERR(gtt->ggtt_mm); 2425 } 2426 2427 intel_vgpu_reset_ggtt(vgpu, false); 2428 2429 return create_scratch_page_tree(vgpu); 2430 } 2431 2432 static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) 2433 { 2434 struct list_head *pos, *n; 2435 struct intel_vgpu_mm *mm; 2436 2437 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2438 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2439 intel_vgpu_destroy_mm(mm); 2440 } 2441 2442 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) 2443 gvt_err("vgpu ppgtt mm is not fully destroyed\n"); 2444 2445 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { 2446 gvt_err("Why we still has spt not freed?\n"); 2447 ppgtt_free_all_spt(vgpu); 2448 } 2449 } 2450 2451 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2452 { 2453 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2454 vgpu->gtt.ggtt_mm = NULL; 2455 } 2456 2457 /** 2458 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2459 * @vgpu: a vGPU 2460 * 2461 * This function is used to clean up per-vGPU graphics memory virtualization 2462 * components. 2463 * 2464 * Returns: 2465 * Zero on success, error code if failed. 2466 */ 2467 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2468 { 2469 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2470 intel_vgpu_destroy_ggtt_mm(vgpu); 2471 release_scratch_page_tree(vgpu); 2472 } 2473 2474 static void clean_spt_oos(struct intel_gvt *gvt) 2475 { 2476 struct intel_gvt_gtt *gtt = &gvt->gtt; 2477 struct list_head *pos, *n; 2478 struct intel_vgpu_oos_page *oos_page; 2479 2480 WARN(!list_empty(>t->oos_page_use_list_head), 2481 "someone is still using oos page\n"); 2482 2483 list_for_each_safe(pos, n, >t->oos_page_free_list_head) { 2484 oos_page = container_of(pos, struct intel_vgpu_oos_page, list); 2485 list_del(&oos_page->list); 2486 kfree(oos_page); 2487 } 2488 } 2489 2490 static int setup_spt_oos(struct intel_gvt *gvt) 2491 { 2492 struct intel_gvt_gtt *gtt = &gvt->gtt; 2493 struct intel_vgpu_oos_page *oos_page; 2494 int i; 2495 int ret; 2496 2497 INIT_LIST_HEAD(>t->oos_page_free_list_head); 2498 INIT_LIST_HEAD(>t->oos_page_use_list_head); 2499 2500 for (i = 0; i < preallocated_oos_pages; i++) { 2501 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2502 if (!oos_page) { 2503 ret = -ENOMEM; 2504 goto fail; 2505 } 2506 2507 INIT_LIST_HEAD(&oos_page->list); 2508 INIT_LIST_HEAD(&oos_page->vm_list); 2509 oos_page->id = i; 2510 list_add_tail(&oos_page->list, >t->oos_page_free_list_head); 2511 } 2512 2513 gvt_dbg_mm("%d oos pages preallocated\n", i); 2514 2515 return 0; 2516 fail: 2517 clean_spt_oos(gvt); 2518 return ret; 2519 } 2520 2521 /** 2522 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object 2523 * @vgpu: a vGPU 2524 * @page_table_level: PPGTT page table level 2525 * @root_entry: PPGTT page table root pointers 2526 * 2527 * This function is used to find a PPGTT mm object from mm object pool 2528 * 2529 * Returns: 2530 * pointer to mm object on success, NULL if failed. 2531 */ 2532 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 2533 u64 pdps[]) 2534 { 2535 struct intel_vgpu_mm *mm; 2536 struct list_head *pos; 2537 2538 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) { 2539 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2540 2541 switch (mm->ppgtt_mm.root_entry_type) { 2542 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 2543 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0]) 2544 return mm; 2545 break; 2546 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 2547 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps, 2548 sizeof(mm->ppgtt_mm.guest_pdps))) 2549 return mm; 2550 break; 2551 default: 2552 GEM_BUG_ON(1); 2553 } 2554 } 2555 return NULL; 2556 } 2557 2558 /** 2559 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object. 2560 * @vgpu: a vGPU 2561 * @root_entry_type: ppgtt root entry type 2562 * @pdps: guest pdps 2563 * 2564 * This function is used to find or create a PPGTT mm object from a guest. 2565 * 2566 * Returns: 2567 * Zero on success, negative error code if failed. 2568 */ 2569 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 2570 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 2571 { 2572 struct intel_vgpu_mm *mm; 2573 2574 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2575 if (mm) { 2576 intel_vgpu_mm_get(mm); 2577 } else { 2578 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps); 2579 if (IS_ERR(mm)) 2580 gvt_vgpu_err("fail to create mm\n"); 2581 } 2582 return mm; 2583 } 2584 2585 /** 2586 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object. 2587 * @vgpu: a vGPU 2588 * @pdps: guest pdps 2589 * 2590 * This function is used to find a PPGTT mm object from a guest and destroy it. 2591 * 2592 * Returns: 2593 * Zero on success, negative error code if failed. 2594 */ 2595 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]) 2596 { 2597 struct intel_vgpu_mm *mm; 2598 2599 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2600 if (!mm) { 2601 gvt_vgpu_err("fail to find ppgtt instance.\n"); 2602 return -EINVAL; 2603 } 2604 intel_vgpu_mm_put(mm); 2605 return 0; 2606 } 2607 2608 /** 2609 * intel_gvt_init_gtt - initialize mm components of a GVT device 2610 * @gvt: GVT device 2611 * 2612 * This function is called at the initialization stage, to initialize 2613 * the mm components of a GVT device. 2614 * 2615 * Returns: 2616 * zero on success, negative error code if failed. 2617 */ 2618 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2619 { 2620 int ret; 2621 void *page; 2622 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2623 dma_addr_t daddr; 2624 2625 gvt_dbg_core("init gtt\n"); 2626 2627 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2628 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2629 2630 page = (void *)get_zeroed_page(GFP_KERNEL); 2631 if (!page) { 2632 gvt_err("fail to allocate scratch ggtt page\n"); 2633 return -ENOMEM; 2634 } 2635 2636 daddr = dma_map_page(dev, virt_to_page(page), 0, 2637 4096, PCI_DMA_BIDIRECTIONAL); 2638 if (dma_mapping_error(dev, daddr)) { 2639 gvt_err("fail to dmamap scratch ggtt page\n"); 2640 __free_page(virt_to_page(page)); 2641 return -ENOMEM; 2642 } 2643 2644 gvt->gtt.scratch_page = virt_to_page(page); 2645 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2646 2647 if (enable_out_of_sync) { 2648 ret = setup_spt_oos(gvt); 2649 if (ret) { 2650 gvt_err("fail to initialize SPT oos\n"); 2651 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2652 __free_page(gvt->gtt.scratch_page); 2653 return ret; 2654 } 2655 } 2656 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2657 return 0; 2658 } 2659 2660 /** 2661 * intel_gvt_clean_gtt - clean up mm components of a GVT device 2662 * @gvt: GVT device 2663 * 2664 * This function is called at the driver unloading stage, to clean up the 2665 * the mm components of a GVT device. 2666 * 2667 */ 2668 void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2669 { 2670 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2671 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << 2672 I915_GTT_PAGE_SHIFT); 2673 2674 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2675 2676 __free_page(gvt->gtt.scratch_page); 2677 2678 if (enable_out_of_sync) 2679 clean_spt_oos(gvt); 2680 } 2681 2682 /** 2683 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances 2684 * @vgpu: a vGPU 2685 * 2686 * This function is called when invalidate all PPGTT instances of a vGPU. 2687 * 2688 */ 2689 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) 2690 { 2691 struct list_head *pos, *n; 2692 struct intel_vgpu_mm *mm; 2693 2694 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2695 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2696 if (mm->type == INTEL_GVT_MM_PPGTT) { 2697 list_del_init(&mm->ppgtt_mm.lru_list); 2698 if (mm->ppgtt_mm.shadowed) 2699 invalidate_ppgtt_mm(mm); 2700 } 2701 } 2702 } 2703 2704 /** 2705 * intel_vgpu_reset_ggtt - reset the GGTT entry 2706 * @vgpu: a vGPU 2707 * @invalidate_old: invalidate old entries 2708 * 2709 * This function is called at the vGPU create stage 2710 * to reset all the GGTT entries. 2711 * 2712 */ 2713 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) 2714 { 2715 struct intel_gvt *gvt = vgpu->gvt; 2716 struct drm_i915_private *dev_priv = gvt->dev_priv; 2717 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2718 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2719 struct intel_gvt_gtt_entry old_entry; 2720 u32 index; 2721 u32 num_entries; 2722 2723 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn); 2724 pte_ops->set_present(&entry); 2725 2726 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2727 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2728 while (num_entries--) { 2729 if (invalidate_old) { 2730 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2731 ggtt_invalidate_pte(vgpu, &old_entry); 2732 } 2733 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2734 } 2735 2736 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2737 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2738 while (num_entries--) { 2739 if (invalidate_old) { 2740 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2741 ggtt_invalidate_pte(vgpu, &old_entry); 2742 } 2743 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2744 } 2745 2746 ggtt_invalidate(dev_priv); 2747 } 2748 2749 /** 2750 * intel_vgpu_reset_gtt - reset the all GTT related status 2751 * @vgpu: a vGPU 2752 * 2753 * This function is called from vfio core to reset reset all 2754 * GTT related status, including GGTT, PPGTT, scratch page. 2755 * 2756 */ 2757 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) 2758 { 2759 /* Shadow pages are only created when there is no page 2760 * table tracking data, so remove page tracking data after 2761 * removing the shadow pages. 2762 */ 2763 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2764 intel_vgpu_reset_ggtt(vgpu, true); 2765 } 2766