1 /* 2 * GTT virtualization 3 * 4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Zhi Wang <zhi.a.wang@intel.com> 27 * Zhenyu Wang <zhenyuw@linux.intel.com> 28 * Xiao Zheng <xiao.zheng@intel.com> 29 * 30 * Contributors: 31 * Min He <min.he@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 #include "i915_pvinfo.h" 39 #include "trace.h" 40 41 static bool enable_out_of_sync = false; 42 static int preallocated_oos_pages = 8192; 43 44 /* 45 * validate a gm address and related range size, 46 * translate it to host gm address 47 */ 48 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 49 { 50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", 53 addr, size); 54 return false; 55 } 56 return true; 57 } 58 59 /* translate a guest gmadr to host gmadr */ 60 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) 61 { 62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), 63 "invalid guest gmadr %llx\n", g_addr)) 64 return -EACCES; 65 66 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) 67 *h_addr = vgpu_aperture_gmadr_base(vgpu) 68 + (g_addr - vgpu_aperture_offset(vgpu)); 69 else 70 *h_addr = vgpu_hidden_gmadr_base(vgpu) 71 + (g_addr - vgpu_hidden_offset(vgpu)); 72 return 0; 73 } 74 75 /* translate a host gmadr to guest gmadr */ 76 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) 77 { 78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), 79 "invalid host gmadr %llx\n", h_addr)) 80 return -EACCES; 81 82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) 83 *g_addr = vgpu_aperture_gmadr_base(vgpu) 84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); 85 else 86 *g_addr = vgpu_hidden_gmadr_base(vgpu) 87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); 88 return 0; 89 } 90 91 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 92 unsigned long *h_index) 93 { 94 u64 h_addr; 95 int ret; 96 97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT, 98 &h_addr); 99 if (ret) 100 return ret; 101 102 *h_index = h_addr >> GTT_PAGE_SHIFT; 103 return 0; 104 } 105 106 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 107 unsigned long *g_index) 108 { 109 u64 g_addr; 110 int ret; 111 112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT, 113 &g_addr); 114 if (ret) 115 return ret; 116 117 *g_index = g_addr >> GTT_PAGE_SHIFT; 118 return 0; 119 } 120 121 #define gtt_type_is_entry(type) \ 122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ 123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \ 124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY) 125 126 #define gtt_type_is_pt(type) \ 127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) 128 129 #define gtt_type_is_pte_pt(type) \ 130 (type == GTT_TYPE_PPGTT_PTE_PT) 131 132 #define gtt_type_is_root_pointer(type) \ 133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY) 134 135 #define gtt_init_entry(e, t, p, v) do { \ 136 (e)->type = t; \ 137 (e)->pdev = p; \ 138 memcpy(&(e)->val64, &v, sizeof(v)); \ 139 } while (0) 140 141 /* 142 * Mappings between GTT_TYPE* enumerations. 143 * Following information can be found according to the given type: 144 * - type of next level page table 145 * - type of entry inside this level page table 146 * - type of entry with PSE set 147 * 148 * If the given type doesn't have such a kind of information, 149 * e.g. give a l4 root entry type, then request to get its PSE type, 150 * give a PTE page table type, then request to get its next level page 151 * table type, as we know l4 root entry doesn't have a PSE bit, 152 * and a PTE page table doesn't have a next level page table type, 153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a 154 * page table. 155 */ 156 157 struct gtt_type_table_entry { 158 int entry_type; 159 int next_pt_type; 160 int pse_entry_type; 161 }; 162 163 #define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \ 164 [type] = { \ 165 .entry_type = e_type, \ 166 .next_pt_type = npt_type, \ 167 .pse_entry_type = pse_type, \ 168 } 169 170 static struct gtt_type_table_entry gtt_type_table[] = { 171 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 172 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 173 GTT_TYPE_PPGTT_PML4_PT, 174 GTT_TYPE_INVALID), 175 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, 176 GTT_TYPE_PPGTT_PML4_ENTRY, 177 GTT_TYPE_PPGTT_PDP_PT, 178 GTT_TYPE_INVALID), 179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, 180 GTT_TYPE_PPGTT_PML4_ENTRY, 181 GTT_TYPE_PPGTT_PDP_PT, 182 GTT_TYPE_INVALID), 183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, 184 GTT_TYPE_PPGTT_PDP_ENTRY, 185 GTT_TYPE_PPGTT_PDE_PT, 186 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 187 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 188 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 189 GTT_TYPE_PPGTT_PDE_PT, 190 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 191 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, 192 GTT_TYPE_PPGTT_PDP_ENTRY, 193 GTT_TYPE_PPGTT_PDE_PT, 194 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 195 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, 196 GTT_TYPE_PPGTT_PDE_ENTRY, 197 GTT_TYPE_PPGTT_PTE_PT, 198 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, 200 GTT_TYPE_PPGTT_PDE_ENTRY, 201 GTT_TYPE_PPGTT_PTE_PT, 202 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, 204 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 205 GTT_TYPE_INVALID, 206 GTT_TYPE_INVALID), 207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, 208 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 209 GTT_TYPE_INVALID, 210 GTT_TYPE_INVALID), 211 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, 212 GTT_TYPE_PPGTT_PDE_ENTRY, 213 GTT_TYPE_INVALID, 214 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 215 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, 216 GTT_TYPE_PPGTT_PDP_ENTRY, 217 GTT_TYPE_INVALID, 218 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, 220 GTT_TYPE_GGTT_PTE, 221 GTT_TYPE_INVALID, 222 GTT_TYPE_INVALID), 223 }; 224 225 static inline int get_next_pt_type(int type) 226 { 227 return gtt_type_table[type].next_pt_type; 228 } 229 230 static inline int get_entry_type(int type) 231 { 232 return gtt_type_table[type].entry_type; 233 } 234 235 static inline int get_pse_type(int type) 236 { 237 return gtt_type_table[type].pse_entry_type; 238 } 239 240 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 241 { 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 243 244 return readq(addr); 245 } 246 247 static void gtt_invalidate(struct drm_i915_private *dev_priv) 248 { 249 mmio_hw_access_pre(dev_priv); 250 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 251 mmio_hw_access_post(dev_priv); 252 } 253 254 static void write_pte64(struct drm_i915_private *dev_priv, 255 unsigned long index, u64 pte) 256 { 257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 258 259 writeq(pte, addr); 260 } 261 262 static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt, 263 struct intel_gvt_gtt_entry *e, 264 unsigned long index, bool hypervisor_access, unsigned long gpa, 265 struct intel_vgpu *vgpu) 266 { 267 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 268 int ret; 269 270 if (WARN_ON(info->gtt_entry_size != 8)) 271 return e; 272 273 if (hypervisor_access) { 274 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + 275 (index << info->gtt_entry_size_shift), 276 &e->val64, 8); 277 WARN_ON(ret); 278 } else if (!pt) { 279 e->val64 = read_pte64(vgpu->gvt->dev_priv, index); 280 } else { 281 e->val64 = *((u64 *)pt + index); 282 } 283 return e; 284 } 285 286 static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt, 287 struct intel_gvt_gtt_entry *e, 288 unsigned long index, bool hypervisor_access, unsigned long gpa, 289 struct intel_vgpu *vgpu) 290 { 291 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 292 int ret; 293 294 if (WARN_ON(info->gtt_entry_size != 8)) 295 return e; 296 297 if (hypervisor_access) { 298 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + 299 (index << info->gtt_entry_size_shift), 300 &e->val64, 8); 301 WARN_ON(ret); 302 } else if (!pt) { 303 write_pte64(vgpu->gvt->dev_priv, index, e->val64); 304 } else { 305 *((u64 *)pt + index) = e->val64; 306 } 307 return e; 308 } 309 310 #define GTT_HAW 46 311 312 #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) 313 #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) 314 #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) 315 316 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 317 { 318 unsigned long pfn; 319 320 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) 321 pfn = (e->val64 & ADDR_1G_MASK) >> 12; 322 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) 323 pfn = (e->val64 & ADDR_2M_MASK) >> 12; 324 else 325 pfn = (e->val64 & ADDR_4K_MASK) >> 12; 326 return pfn; 327 } 328 329 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) 330 { 331 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 332 e->val64 &= ~ADDR_1G_MASK; 333 pfn &= (ADDR_1G_MASK >> 12); 334 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { 335 e->val64 &= ~ADDR_2M_MASK; 336 pfn &= (ADDR_2M_MASK >> 12); 337 } else { 338 e->val64 &= ~ADDR_4K_MASK; 339 pfn &= (ADDR_4K_MASK >> 12); 340 } 341 342 e->val64 |= (pfn << 12); 343 } 344 345 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) 346 { 347 /* Entry doesn't have PSE bit. */ 348 if (get_pse_type(e->type) == GTT_TYPE_INVALID) 349 return false; 350 351 e->type = get_entry_type(e->type); 352 if (!(e->val64 & (1 << 7))) 353 return false; 354 355 e->type = get_pse_type(e->type); 356 return true; 357 } 358 359 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) 360 { 361 /* 362 * i915 writes PDP root pointer registers without present bit, 363 * it also works, so we need to treat root pointer entry 364 * specifically. 365 */ 366 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY 367 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 368 return (e->val64 != 0); 369 else 370 return (e->val64 & (1 << 0)); 371 } 372 373 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) 374 { 375 e->val64 &= ~(1 << 0); 376 } 377 378 /* 379 * Per-platform GMA routines. 380 */ 381 static unsigned long gma_to_ggtt_pte_index(unsigned long gma) 382 { 383 unsigned long x = (gma >> GTT_PAGE_SHIFT); 384 385 trace_gma_index(__func__, gma, x); 386 return x; 387 } 388 389 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \ 390 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \ 391 { \ 392 unsigned long x = (exp); \ 393 trace_gma_index(__func__, gma, x); \ 394 return x; \ 395 } 396 397 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff)); 398 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff)); 399 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); 400 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); 401 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); 402 403 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { 404 .get_entry = gtt_get_entry64, 405 .set_entry = gtt_set_entry64, 406 .clear_present = gtt_entry_clear_present, 407 .test_present = gen8_gtt_test_present, 408 .test_pse = gen8_gtt_test_pse, 409 .get_pfn = gen8_gtt_get_pfn, 410 .set_pfn = gen8_gtt_set_pfn, 411 }; 412 413 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { 414 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, 415 .gma_to_pte_index = gen8_gma_to_pte_index, 416 .gma_to_pde_index = gen8_gma_to_pde_index, 417 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index, 418 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index, 419 .gma_to_pml4_index = gen8_gma_to_pml4_index, 420 }; 421 422 static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, 423 struct intel_gvt_gtt_entry *m) 424 { 425 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 426 unsigned long gfn, mfn; 427 428 *m = *p; 429 430 if (!ops->test_present(p)) 431 return 0; 432 433 gfn = ops->get_pfn(p); 434 435 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 436 if (mfn == INTEL_GVT_INVALID_ADDR) { 437 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); 438 return -ENXIO; 439 } 440 441 ops->set_pfn(m, mfn); 442 return 0; 443 } 444 445 /* 446 * MM helpers. 447 */ 448 struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm, 449 void *page_table, struct intel_gvt_gtt_entry *e, 450 unsigned long index) 451 { 452 struct intel_gvt *gvt = mm->vgpu->gvt; 453 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 454 455 e->type = mm->page_table_entry_type; 456 457 ops->get_entry(page_table, e, index, false, 0, mm->vgpu); 458 ops->test_pse(e); 459 return e; 460 } 461 462 struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm, 463 void *page_table, struct intel_gvt_gtt_entry *e, 464 unsigned long index) 465 { 466 struct intel_gvt *gvt = mm->vgpu->gvt; 467 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 468 469 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu); 470 } 471 472 /* 473 * PPGTT shadow page table helpers. 474 */ 475 static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry( 476 struct intel_vgpu_ppgtt_spt *spt, 477 void *page_table, int type, 478 struct intel_gvt_gtt_entry *e, unsigned long index, 479 bool guest) 480 { 481 struct intel_gvt *gvt = spt->vgpu->gvt; 482 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 483 484 e->type = get_entry_type(type); 485 486 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 487 return e; 488 489 ops->get_entry(page_table, e, index, guest, 490 spt->guest_page.gfn << GTT_PAGE_SHIFT, 491 spt->vgpu); 492 ops->test_pse(e); 493 return e; 494 } 495 496 static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry( 497 struct intel_vgpu_ppgtt_spt *spt, 498 void *page_table, int type, 499 struct intel_gvt_gtt_entry *e, unsigned long index, 500 bool guest) 501 { 502 struct intel_gvt *gvt = spt->vgpu->gvt; 503 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 504 505 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 506 return e; 507 508 return ops->set_entry(page_table, e, index, guest, 509 spt->guest_page.gfn << GTT_PAGE_SHIFT, 510 spt->vgpu); 511 } 512 513 #define ppgtt_get_guest_entry(spt, e, index) \ 514 ppgtt_spt_get_entry(spt, NULL, \ 515 spt->guest_page_type, e, index, true) 516 517 #define ppgtt_set_guest_entry(spt, e, index) \ 518 ppgtt_spt_set_entry(spt, NULL, \ 519 spt->guest_page_type, e, index, true) 520 521 #define ppgtt_get_shadow_entry(spt, e, index) \ 522 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ 523 spt->shadow_page.type, e, index, false) 524 525 #define ppgtt_set_shadow_entry(spt, e, index) \ 526 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ 527 spt->shadow_page.type, e, index, false) 528 529 /** 530 * intel_vgpu_init_guest_page - init a guest page data structure 531 * @vgpu: a vGPU 532 * @p: a guest page data structure 533 * @gfn: guest memory page frame number 534 * @handler: function will be called when target guest memory page has 535 * been modified. 536 * 537 * This function is called when user wants to track a guest memory page. 538 * 539 * Returns: 540 * Zero on success, negative error code if failed. 541 */ 542 int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu, 543 struct intel_vgpu_guest_page *p, 544 unsigned long gfn, 545 int (*handler)(void *, u64, void *, int), 546 void *data) 547 { 548 INIT_HLIST_NODE(&p->node); 549 550 p->writeprotection = false; 551 p->gfn = gfn; 552 p->handler = handler; 553 p->data = data; 554 p->oos_page = NULL; 555 p->write_cnt = 0; 556 557 hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn); 558 return 0; 559 } 560 561 static int detach_oos_page(struct intel_vgpu *vgpu, 562 struct intel_vgpu_oos_page *oos_page); 563 564 /** 565 * intel_vgpu_clean_guest_page - release the resource owned by guest page data 566 * structure 567 * @vgpu: a vGPU 568 * @p: a tracked guest page 569 * 570 * This function is called when user tries to stop tracking a guest memory 571 * page. 572 */ 573 void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu, 574 struct intel_vgpu_guest_page *p) 575 { 576 if (!hlist_unhashed(&p->node)) 577 hash_del(&p->node); 578 579 if (p->oos_page) 580 detach_oos_page(vgpu, p->oos_page); 581 582 if (p->writeprotection) 583 intel_gvt_hypervisor_unset_wp_page(vgpu, p); 584 } 585 586 /** 587 * intel_vgpu_find_guest_page - find a guest page data structure by GFN. 588 * @vgpu: a vGPU 589 * @gfn: guest memory page frame number 590 * 591 * This function is called when emulation logic wants to know if a trapped GFN 592 * is a tracked guest page. 593 * 594 * Returns: 595 * Pointer to guest page data structure, NULL if failed. 596 */ 597 struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( 598 struct intel_vgpu *vgpu, unsigned long gfn) 599 { 600 struct intel_vgpu_guest_page *p; 601 602 hash_for_each_possible(vgpu->gtt.guest_page_hash_table, 603 p, node, gfn) { 604 if (p->gfn == gfn) 605 return p; 606 } 607 return NULL; 608 } 609 610 static inline int init_shadow_page(struct intel_vgpu *vgpu, 611 struct intel_vgpu_shadow_page *p, int type) 612 { 613 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 614 dma_addr_t daddr; 615 616 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 617 if (dma_mapping_error(kdev, daddr)) { 618 gvt_vgpu_err("fail to map dma addr\n"); 619 return -EINVAL; 620 } 621 622 p->vaddr = page_address(p->page); 623 p->type = type; 624 625 INIT_HLIST_NODE(&p->node); 626 627 p->mfn = daddr >> GTT_PAGE_SHIFT; 628 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); 629 return 0; 630 } 631 632 static inline void clean_shadow_page(struct intel_vgpu *vgpu, 633 struct intel_vgpu_shadow_page *p) 634 { 635 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 636 637 dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, 638 PCI_DMA_BIDIRECTIONAL); 639 640 if (!hlist_unhashed(&p->node)) 641 hash_del(&p->node); 642 } 643 644 static inline struct intel_vgpu_shadow_page *find_shadow_page( 645 struct intel_vgpu *vgpu, unsigned long mfn) 646 { 647 struct intel_vgpu_shadow_page *p; 648 649 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table, 650 p, node, mfn) { 651 if (p->mfn == mfn) 652 return p; 653 } 654 return NULL; 655 } 656 657 #define guest_page_to_ppgtt_spt(ptr) \ 658 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page) 659 660 #define shadow_page_to_ppgtt_spt(ptr) \ 661 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page) 662 663 static void *alloc_spt(gfp_t gfp_mask) 664 { 665 struct intel_vgpu_ppgtt_spt *spt; 666 667 spt = kzalloc(sizeof(*spt), gfp_mask); 668 if (!spt) 669 return NULL; 670 671 spt->shadow_page.page = alloc_page(gfp_mask); 672 if (!spt->shadow_page.page) { 673 kfree(spt); 674 return NULL; 675 } 676 return spt; 677 } 678 679 static void free_spt(struct intel_vgpu_ppgtt_spt *spt) 680 { 681 __free_page(spt->shadow_page.page); 682 kfree(spt); 683 } 684 685 static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 686 { 687 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); 688 689 clean_shadow_page(spt->vgpu, &spt->shadow_page); 690 intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); 691 list_del_init(&spt->post_shadow_list); 692 693 free_spt(spt); 694 } 695 696 static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu) 697 { 698 struct hlist_node *n; 699 struct intel_vgpu_shadow_page *sp; 700 int i; 701 702 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node) 703 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp)); 704 } 705 706 static int ppgtt_handle_guest_write_page_table_bytes(void *gp, 707 u64 pa, void *p_data, int bytes); 708 709 static int ppgtt_write_protection_handler(void *gp, u64 pa, 710 void *p_data, int bytes) 711 { 712 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp; 713 int ret; 714 715 if (bytes != 4 && bytes != 8) 716 return -EINVAL; 717 718 if (!gpt->writeprotection) 719 return -EINVAL; 720 721 ret = ppgtt_handle_guest_write_page_table_bytes(gp, 722 pa, p_data, bytes); 723 if (ret) 724 return ret; 725 return ret; 726 } 727 728 static int reclaim_one_mm(struct intel_gvt *gvt); 729 730 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( 731 struct intel_vgpu *vgpu, int type, unsigned long gfn) 732 { 733 struct intel_vgpu_ppgtt_spt *spt = NULL; 734 int ret; 735 736 retry: 737 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); 738 if (!spt) { 739 if (reclaim_one_mm(vgpu->gvt)) 740 goto retry; 741 742 gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); 743 return ERR_PTR(-ENOMEM); 744 } 745 746 spt->vgpu = vgpu; 747 spt->guest_page_type = type; 748 atomic_set(&spt->refcount, 1); 749 INIT_LIST_HEAD(&spt->post_shadow_list); 750 751 /* 752 * TODO: guest page type may be different with shadow page type, 753 * when we support PSE page in future. 754 */ 755 ret = init_shadow_page(vgpu, &spt->shadow_page, type); 756 if (ret) { 757 gvt_vgpu_err("fail to initialize shadow page for spt\n"); 758 goto err; 759 } 760 761 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, 762 gfn, ppgtt_write_protection_handler, NULL); 763 if (ret) { 764 gvt_vgpu_err("fail to initialize guest page for spt\n"); 765 goto err; 766 } 767 768 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); 769 return spt; 770 err: 771 ppgtt_free_shadow_page(spt); 772 return ERR_PTR(ret); 773 } 774 775 static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( 776 struct intel_vgpu *vgpu, unsigned long mfn) 777 { 778 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn); 779 780 if (p) 781 return shadow_page_to_ppgtt_spt(p); 782 783 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); 784 return NULL; 785 } 786 787 #define pt_entry_size_shift(spt) \ 788 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) 789 790 #define pt_entries(spt) \ 791 (GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) 792 793 #define for_each_present_guest_entry(spt, e, i) \ 794 for (i = 0; i < pt_entries(spt); i++) \ 795 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \ 796 ppgtt_get_guest_entry(spt, e, i))) 797 798 #define for_each_present_shadow_entry(spt, e, i) \ 799 for (i = 0; i < pt_entries(spt); i++) \ 800 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \ 801 ppgtt_get_shadow_entry(spt, e, i))) 802 803 static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 804 { 805 int v = atomic_read(&spt->refcount); 806 807 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); 808 809 atomic_inc(&spt->refcount); 810 } 811 812 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt); 813 814 static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, 815 struct intel_gvt_gtt_entry *e) 816 { 817 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 818 struct intel_vgpu_ppgtt_spt *s; 819 intel_gvt_gtt_type_t cur_pt_type; 820 821 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type)))) 822 return -EINVAL; 823 824 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 825 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 826 cur_pt_type = get_next_pt_type(e->type) + 1; 827 if (ops->get_pfn(e) == 828 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 829 return 0; 830 } 831 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 832 if (!s) { 833 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 834 ops->get_pfn(e)); 835 return -ENXIO; 836 } 837 return ppgtt_invalidate_shadow_page(s); 838 } 839 840 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 841 { 842 struct intel_vgpu *vgpu = spt->vgpu; 843 struct intel_gvt_gtt_entry e; 844 unsigned long index; 845 int ret; 846 int v = atomic_read(&spt->refcount); 847 848 trace_spt_change(spt->vgpu->id, "die", spt, 849 spt->guest_page.gfn, spt->shadow_page.type); 850 851 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); 852 853 if (atomic_dec_return(&spt->refcount) > 0) 854 return 0; 855 856 if (gtt_type_is_pte_pt(spt->shadow_page.type)) 857 goto release; 858 859 for_each_present_shadow_entry(spt, &e, index) { 860 if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 861 gvt_vgpu_err("GVT doesn't support pse bit for now\n"); 862 return -EINVAL; 863 } 864 ret = ppgtt_invalidate_shadow_page_by_shadow_entry( 865 spt->vgpu, &e); 866 if (ret) 867 goto fail; 868 } 869 release: 870 trace_spt_change(spt->vgpu->id, "release", spt, 871 spt->guest_page.gfn, spt->shadow_page.type); 872 ppgtt_free_shadow_page(spt); 873 return 0; 874 fail: 875 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", 876 spt, e.val64, e.type); 877 return ret; 878 } 879 880 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt); 881 882 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( 883 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) 884 { 885 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 886 struct intel_vgpu_ppgtt_spt *s = NULL; 887 struct intel_vgpu_guest_page *g; 888 int ret; 889 890 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) { 891 ret = -EINVAL; 892 goto fail; 893 } 894 895 g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we)); 896 if (g) { 897 s = guest_page_to_ppgtt_spt(g); 898 ppgtt_get_shadow_page(s); 899 } else { 900 int type = get_next_pt_type(we->type); 901 902 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we)); 903 if (IS_ERR(s)) { 904 ret = PTR_ERR(s); 905 goto fail; 906 } 907 908 ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page); 909 if (ret) 910 goto fail; 911 912 ret = ppgtt_populate_shadow_page(s); 913 if (ret) 914 goto fail; 915 916 trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn, 917 s->shadow_page.type); 918 } 919 return s; 920 fail: 921 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 922 s, we->val64, we->type); 923 return ERR_PTR(ret); 924 } 925 926 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, 927 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) 928 { 929 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; 930 931 se->type = ge->type; 932 se->val64 = ge->val64; 933 934 ops->set_pfn(se, s->shadow_page.mfn); 935 } 936 937 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 938 { 939 struct intel_vgpu *vgpu = spt->vgpu; 940 struct intel_vgpu_ppgtt_spt *s; 941 struct intel_gvt_gtt_entry se, ge; 942 unsigned long i; 943 int ret; 944 945 trace_spt_change(spt->vgpu->id, "born", spt, 946 spt->guest_page.gfn, spt->shadow_page.type); 947 948 if (gtt_type_is_pte_pt(spt->shadow_page.type)) { 949 for_each_present_guest_entry(spt, &ge, i) { 950 ret = gtt_entry_p2m(vgpu, &ge, &se); 951 if (ret) 952 goto fail; 953 ppgtt_set_shadow_entry(spt, &se, i); 954 } 955 return 0; 956 } 957 958 for_each_present_guest_entry(spt, &ge, i) { 959 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 960 gvt_vgpu_err("GVT doesn't support pse bit now\n"); 961 ret = -EINVAL; 962 goto fail; 963 } 964 965 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 966 if (IS_ERR(s)) { 967 ret = PTR_ERR(s); 968 goto fail; 969 } 970 ppgtt_get_shadow_entry(spt, &se, i); 971 ppgtt_generate_shadow_entry(&se, s, &ge); 972 ppgtt_set_shadow_entry(spt, &se, i); 973 } 974 return 0; 975 fail: 976 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 977 spt, ge.val64, ge.type); 978 return ret; 979 } 980 981 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, 982 unsigned long index) 983 { 984 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 985 struct intel_vgpu_shadow_page *sp = &spt->shadow_page; 986 struct intel_vgpu *vgpu = spt->vgpu; 987 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 988 struct intel_gvt_gtt_entry e; 989 int ret; 990 991 ppgtt_get_shadow_entry(spt, &e, index); 992 993 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64, 994 index); 995 996 if (!ops->test_present(&e)) 997 return 0; 998 999 if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn) 1000 return 0; 1001 1002 if (gtt_type_is_pt(get_next_pt_type(e.type))) { 1003 struct intel_vgpu_ppgtt_spt *s = 1004 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); 1005 if (!s) { 1006 gvt_vgpu_err("fail to find guest page\n"); 1007 ret = -ENXIO; 1008 goto fail; 1009 } 1010 ret = ppgtt_invalidate_shadow_page(s); 1011 if (ret) 1012 goto fail; 1013 } 1014 ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn); 1015 ppgtt_set_shadow_entry(spt, &e, index); 1016 return 0; 1017 fail: 1018 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1019 spt, e.val64, e.type); 1020 return ret; 1021 } 1022 1023 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, 1024 struct intel_gvt_gtt_entry *we, unsigned long index) 1025 { 1026 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1027 struct intel_vgpu_shadow_page *sp = &spt->shadow_page; 1028 struct intel_vgpu *vgpu = spt->vgpu; 1029 struct intel_gvt_gtt_entry m; 1030 struct intel_vgpu_ppgtt_spt *s; 1031 int ret; 1032 1033 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type, 1034 we->val64, index); 1035 1036 if (gtt_type_is_pt(get_next_pt_type(we->type))) { 1037 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we); 1038 if (IS_ERR(s)) { 1039 ret = PTR_ERR(s); 1040 goto fail; 1041 } 1042 ppgtt_get_shadow_entry(spt, &m, index); 1043 ppgtt_generate_shadow_entry(&m, s, we); 1044 ppgtt_set_shadow_entry(spt, &m, index); 1045 } else { 1046 ret = gtt_entry_p2m(vgpu, we, &m); 1047 if (ret) 1048 goto fail; 1049 ppgtt_set_shadow_entry(spt, &m, index); 1050 } 1051 return 0; 1052 fail: 1053 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", 1054 spt, we->val64, we->type); 1055 return ret; 1056 } 1057 1058 static int sync_oos_page(struct intel_vgpu *vgpu, 1059 struct intel_vgpu_oos_page *oos_page) 1060 { 1061 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1062 struct intel_gvt *gvt = vgpu->gvt; 1063 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1064 struct intel_vgpu_ppgtt_spt *spt = 1065 guest_page_to_ppgtt_spt(oos_page->guest_page); 1066 struct intel_gvt_gtt_entry old, new, m; 1067 int index; 1068 int ret; 1069 1070 trace_oos_change(vgpu->id, "sync", oos_page->id, 1071 oos_page->guest_page, spt->guest_page_type); 1072 1073 old.type = new.type = get_entry_type(spt->guest_page_type); 1074 old.val64 = new.val64 = 0; 1075 1076 for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift); 1077 index++) { 1078 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); 1079 ops->get_entry(NULL, &new, index, true, 1080 oos_page->guest_page->gfn << PAGE_SHIFT, vgpu); 1081 1082 if (old.val64 == new.val64 1083 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) 1084 continue; 1085 1086 trace_oos_sync(vgpu->id, oos_page->id, 1087 oos_page->guest_page, spt->guest_page_type, 1088 new.val64, index); 1089 1090 ret = gtt_entry_p2m(vgpu, &new, &m); 1091 if (ret) 1092 return ret; 1093 1094 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); 1095 ppgtt_set_shadow_entry(spt, &m, index); 1096 } 1097 1098 oos_page->guest_page->write_cnt = 0; 1099 list_del_init(&spt->post_shadow_list); 1100 return 0; 1101 } 1102 1103 static int detach_oos_page(struct intel_vgpu *vgpu, 1104 struct intel_vgpu_oos_page *oos_page) 1105 { 1106 struct intel_gvt *gvt = vgpu->gvt; 1107 struct intel_vgpu_ppgtt_spt *spt = 1108 guest_page_to_ppgtt_spt(oos_page->guest_page); 1109 1110 trace_oos_change(vgpu->id, "detach", oos_page->id, 1111 oos_page->guest_page, spt->guest_page_type); 1112 1113 oos_page->guest_page->write_cnt = 0; 1114 oos_page->guest_page->oos_page = NULL; 1115 oos_page->guest_page = NULL; 1116 1117 list_del_init(&oos_page->vm_list); 1118 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); 1119 1120 return 0; 1121 } 1122 1123 static int attach_oos_page(struct intel_vgpu *vgpu, 1124 struct intel_vgpu_oos_page *oos_page, 1125 struct intel_vgpu_guest_page *gpt) 1126 { 1127 struct intel_gvt *gvt = vgpu->gvt; 1128 int ret; 1129 1130 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT, 1131 oos_page->mem, GTT_PAGE_SIZE); 1132 if (ret) 1133 return ret; 1134 1135 oos_page->guest_page = gpt; 1136 gpt->oos_page = oos_page; 1137 1138 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); 1139 1140 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id, 1141 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1142 return 0; 1143 } 1144 1145 static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu, 1146 struct intel_vgpu_guest_page *gpt) 1147 { 1148 int ret; 1149 1150 ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt); 1151 if (ret) 1152 return ret; 1153 1154 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id, 1155 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1156 1157 list_del_init(&gpt->oos_page->vm_list); 1158 return sync_oos_page(vgpu, gpt->oos_page); 1159 } 1160 1161 static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu, 1162 struct intel_vgpu_guest_page *gpt) 1163 { 1164 struct intel_gvt *gvt = vgpu->gvt; 1165 struct intel_gvt_gtt *gtt = &gvt->gtt; 1166 struct intel_vgpu_oos_page *oos_page = gpt->oos_page; 1167 int ret; 1168 1169 WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); 1170 1171 if (list_empty(>t->oos_page_free_list_head)) { 1172 oos_page = container_of(gtt->oos_page_use_list_head.next, 1173 struct intel_vgpu_oos_page, list); 1174 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); 1175 if (ret) 1176 return ret; 1177 ret = detach_oos_page(vgpu, oos_page); 1178 if (ret) 1179 return ret; 1180 } else 1181 oos_page = container_of(gtt->oos_page_free_list_head.next, 1182 struct intel_vgpu_oos_page, list); 1183 return attach_oos_page(vgpu, oos_page, gpt); 1184 } 1185 1186 static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu, 1187 struct intel_vgpu_guest_page *gpt) 1188 { 1189 struct intel_vgpu_oos_page *oos_page = gpt->oos_page; 1190 1191 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) 1192 return -EINVAL; 1193 1194 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id, 1195 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); 1196 1197 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head); 1198 return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt); 1199 } 1200 1201 /** 1202 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU 1203 * @vgpu: a vGPU 1204 * 1205 * This function is called before submitting a guest workload to host, 1206 * to sync all the out-of-synced shadow for vGPU 1207 * 1208 * Returns: 1209 * Zero on success, negative error code if failed. 1210 */ 1211 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) 1212 { 1213 struct list_head *pos, *n; 1214 struct intel_vgpu_oos_page *oos_page; 1215 int ret; 1216 1217 if (!enable_out_of_sync) 1218 return 0; 1219 1220 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { 1221 oos_page = container_of(pos, 1222 struct intel_vgpu_oos_page, vm_list); 1223 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); 1224 if (ret) 1225 return ret; 1226 } 1227 return 0; 1228 } 1229 1230 /* 1231 * The heart of PPGTT shadow page table. 1232 */ 1233 static int ppgtt_handle_guest_write_page_table( 1234 struct intel_vgpu_guest_page *gpt, 1235 struct intel_gvt_gtt_entry *we, unsigned long index) 1236 { 1237 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1238 struct intel_vgpu *vgpu = spt->vgpu; 1239 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1240 1241 int ret; 1242 int new_present; 1243 1244 new_present = ops->test_present(we); 1245 1246 ret = ppgtt_handle_guest_entry_removal(gpt, index); 1247 if (ret) 1248 goto fail; 1249 1250 if (new_present) { 1251 ret = ppgtt_handle_guest_entry_add(gpt, we, index); 1252 if (ret) 1253 goto fail; 1254 } 1255 return 0; 1256 fail: 1257 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", 1258 spt, we->val64, we->type); 1259 return ret; 1260 } 1261 1262 static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt) 1263 { 1264 return enable_out_of_sync 1265 && gtt_type_is_pte_pt( 1266 guest_page_to_ppgtt_spt(gpt)->guest_page_type) 1267 && gpt->write_cnt >= 2; 1268 } 1269 1270 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, 1271 unsigned long index) 1272 { 1273 set_bit(index, spt->post_shadow_bitmap); 1274 if (!list_empty(&spt->post_shadow_list)) 1275 return; 1276 1277 list_add_tail(&spt->post_shadow_list, 1278 &spt->vgpu->gtt.post_shadow_list_head); 1279 } 1280 1281 /** 1282 * intel_vgpu_flush_post_shadow - flush the post shadow transactions 1283 * @vgpu: a vGPU 1284 * 1285 * This function is called before submitting a guest workload to host, 1286 * to flush all the post shadows for a vGPU. 1287 * 1288 * Returns: 1289 * Zero on success, negative error code if failed. 1290 */ 1291 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) 1292 { 1293 struct list_head *pos, *n; 1294 struct intel_vgpu_ppgtt_spt *spt; 1295 struct intel_gvt_gtt_entry ge; 1296 unsigned long index; 1297 int ret; 1298 1299 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { 1300 spt = container_of(pos, struct intel_vgpu_ppgtt_spt, 1301 post_shadow_list); 1302 1303 for_each_set_bit(index, spt->post_shadow_bitmap, 1304 GTT_ENTRY_NUM_IN_ONE_PAGE) { 1305 ppgtt_get_guest_entry(spt, &ge, index); 1306 1307 ret = ppgtt_handle_guest_write_page_table( 1308 &spt->guest_page, &ge, index); 1309 if (ret) 1310 return ret; 1311 clear_bit(index, spt->post_shadow_bitmap); 1312 } 1313 list_del_init(&spt->post_shadow_list); 1314 } 1315 return 0; 1316 } 1317 1318 static int ppgtt_handle_guest_write_page_table_bytes(void *gp, 1319 u64 pa, void *p_data, int bytes) 1320 { 1321 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp; 1322 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); 1323 struct intel_vgpu *vgpu = spt->vgpu; 1324 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1325 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1326 struct intel_gvt_gtt_entry we; 1327 unsigned long index; 1328 int ret; 1329 1330 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; 1331 1332 ppgtt_get_guest_entry(spt, &we, index); 1333 1334 ops->test_pse(&we); 1335 1336 if (bytes == info->gtt_entry_size) { 1337 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index); 1338 if (ret) 1339 return ret; 1340 } else { 1341 if (!test_bit(index, spt->post_shadow_bitmap)) { 1342 ret = ppgtt_handle_guest_entry_removal(gpt, index); 1343 if (ret) 1344 return ret; 1345 } 1346 1347 ppgtt_set_post_shadow(spt, index); 1348 } 1349 1350 if (!enable_out_of_sync) 1351 return 0; 1352 1353 gpt->write_cnt++; 1354 1355 if (gpt->oos_page) 1356 ops->set_entry(gpt->oos_page->mem, &we, index, 1357 false, 0, vgpu); 1358 1359 if (can_do_out_of_sync(gpt)) { 1360 if (!gpt->oos_page) 1361 ppgtt_allocate_oos_page(vgpu, gpt); 1362 1363 ret = ppgtt_set_guest_page_oos(vgpu, gpt); 1364 if (ret < 0) 1365 return ret; 1366 } 1367 return 0; 1368 } 1369 1370 /* 1371 * mm page table allocation policy for bdw+ 1372 * - for ggtt, only virtual page table will be allocated. 1373 * - for ppgtt, dedicated virtual/shadow page table will be allocated. 1374 */ 1375 static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) 1376 { 1377 struct intel_vgpu *vgpu = mm->vgpu; 1378 struct intel_gvt *gvt = vgpu->gvt; 1379 const struct intel_gvt_device_info *info = &gvt->device_info; 1380 void *mem; 1381 1382 if (mm->type == INTEL_GVT_MM_PPGTT) { 1383 mm->page_table_entry_cnt = 4; 1384 mm->page_table_entry_size = mm->page_table_entry_cnt * 1385 info->gtt_entry_size; 1386 mem = kzalloc(mm->has_shadow_page_table ? 1387 mm->page_table_entry_size * 2 1388 : mm->page_table_entry_size, GFP_KERNEL); 1389 if (!mem) 1390 return -ENOMEM; 1391 mm->virtual_page_table = mem; 1392 if (!mm->has_shadow_page_table) 1393 return 0; 1394 mm->shadow_page_table = mem + mm->page_table_entry_size; 1395 } else if (mm->type == INTEL_GVT_MM_GGTT) { 1396 mm->page_table_entry_cnt = 1397 (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT); 1398 mm->page_table_entry_size = mm->page_table_entry_cnt * 1399 info->gtt_entry_size; 1400 mem = vzalloc(mm->page_table_entry_size); 1401 if (!mem) 1402 return -ENOMEM; 1403 mm->virtual_page_table = mem; 1404 } 1405 return 0; 1406 } 1407 1408 static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm) 1409 { 1410 if (mm->type == INTEL_GVT_MM_PPGTT) { 1411 kfree(mm->virtual_page_table); 1412 } else if (mm->type == INTEL_GVT_MM_GGTT) { 1413 if (mm->virtual_page_table) 1414 vfree(mm->virtual_page_table); 1415 } 1416 mm->virtual_page_table = mm->shadow_page_table = NULL; 1417 } 1418 1419 static void invalidate_mm(struct intel_vgpu_mm *mm) 1420 { 1421 struct intel_vgpu *vgpu = mm->vgpu; 1422 struct intel_gvt *gvt = vgpu->gvt; 1423 struct intel_gvt_gtt *gtt = &gvt->gtt; 1424 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1425 struct intel_gvt_gtt_entry se; 1426 int i; 1427 1428 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed)) 1429 return; 1430 1431 for (i = 0; i < mm->page_table_entry_cnt; i++) { 1432 ppgtt_get_shadow_root_entry(mm, &se, i); 1433 if (!ops->test_present(&se)) 1434 continue; 1435 ppgtt_invalidate_shadow_page_by_shadow_entry( 1436 vgpu, &se); 1437 se.val64 = 0; 1438 ppgtt_set_shadow_root_entry(mm, &se, i); 1439 1440 trace_gpt_change(vgpu->id, "destroy root pointer", 1441 NULL, se.type, se.val64, i); 1442 } 1443 mm->shadowed = false; 1444 } 1445 1446 /** 1447 * intel_vgpu_destroy_mm - destroy a mm object 1448 * @mm: a kref object 1449 * 1450 * This function is used to destroy a mm object for vGPU 1451 * 1452 */ 1453 void intel_vgpu_destroy_mm(struct kref *mm_ref) 1454 { 1455 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1456 struct intel_vgpu *vgpu = mm->vgpu; 1457 struct intel_gvt *gvt = vgpu->gvt; 1458 struct intel_gvt_gtt *gtt = &gvt->gtt; 1459 1460 if (!mm->initialized) 1461 goto out; 1462 1463 list_del(&mm->list); 1464 list_del(&mm->lru_list); 1465 1466 if (mm->has_shadow_page_table) 1467 invalidate_mm(mm); 1468 1469 gtt->mm_free_page_table(mm); 1470 out: 1471 kfree(mm); 1472 } 1473 1474 static int shadow_mm(struct intel_vgpu_mm *mm) 1475 { 1476 struct intel_vgpu *vgpu = mm->vgpu; 1477 struct intel_gvt *gvt = vgpu->gvt; 1478 struct intel_gvt_gtt *gtt = &gvt->gtt; 1479 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1480 struct intel_vgpu_ppgtt_spt *spt; 1481 struct intel_gvt_gtt_entry ge, se; 1482 int i; 1483 int ret; 1484 1485 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed)) 1486 return 0; 1487 1488 mm->shadowed = true; 1489 1490 for (i = 0; i < mm->page_table_entry_cnt; i++) { 1491 ppgtt_get_guest_root_entry(mm, &ge, i); 1492 if (!ops->test_present(&ge)) 1493 continue; 1494 1495 trace_gpt_change(vgpu->id, __func__, NULL, 1496 ge.type, ge.val64, i); 1497 1498 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1499 if (IS_ERR(spt)) { 1500 gvt_vgpu_err("fail to populate guest root pointer\n"); 1501 ret = PTR_ERR(spt); 1502 goto fail; 1503 } 1504 ppgtt_generate_shadow_entry(&se, spt, &ge); 1505 ppgtt_set_shadow_root_entry(mm, &se, i); 1506 1507 trace_gpt_change(vgpu->id, "populate root pointer", 1508 NULL, se.type, se.val64, i); 1509 } 1510 return 0; 1511 fail: 1512 invalidate_mm(mm); 1513 return ret; 1514 } 1515 1516 /** 1517 * intel_vgpu_create_mm - create a mm object for a vGPU 1518 * @vgpu: a vGPU 1519 * @mm_type: mm object type, should be PPGTT or GGTT 1520 * @virtual_page_table: page table root pointers. Could be NULL if user wants 1521 * to populate shadow later. 1522 * @page_table_level: describe the page table level of the mm object 1523 * @pde_base_index: pde root pointer base in GGTT MMIO. 1524 * 1525 * This function is used to create a mm object for a vGPU. 1526 * 1527 * Returns: 1528 * Zero on success, negative error code in pointer if failed. 1529 */ 1530 struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, 1531 int mm_type, void *virtual_page_table, int page_table_level, 1532 u32 pde_base_index) 1533 { 1534 struct intel_gvt *gvt = vgpu->gvt; 1535 struct intel_gvt_gtt *gtt = &gvt->gtt; 1536 struct intel_vgpu_mm *mm; 1537 int ret; 1538 1539 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1540 if (!mm) { 1541 ret = -ENOMEM; 1542 goto fail; 1543 } 1544 1545 mm->type = mm_type; 1546 1547 if (page_table_level == 1) 1548 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE; 1549 else if (page_table_level == 3) 1550 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1551 else if (page_table_level == 4) 1552 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; 1553 else { 1554 WARN_ON(1); 1555 ret = -EINVAL; 1556 goto fail; 1557 } 1558 1559 mm->page_table_level = page_table_level; 1560 mm->pde_base_index = pde_base_index; 1561 1562 mm->vgpu = vgpu; 1563 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT); 1564 1565 kref_init(&mm->ref); 1566 atomic_set(&mm->pincount, 0); 1567 INIT_LIST_HEAD(&mm->list); 1568 INIT_LIST_HEAD(&mm->lru_list); 1569 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head); 1570 1571 ret = gtt->mm_alloc_page_table(mm); 1572 if (ret) { 1573 gvt_vgpu_err("fail to allocate page table for mm\n"); 1574 goto fail; 1575 } 1576 1577 mm->initialized = true; 1578 1579 if (virtual_page_table) 1580 memcpy(mm->virtual_page_table, virtual_page_table, 1581 mm->page_table_entry_size); 1582 1583 if (mm->has_shadow_page_table) { 1584 ret = shadow_mm(mm); 1585 if (ret) 1586 goto fail; 1587 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head); 1588 } 1589 return mm; 1590 fail: 1591 gvt_vgpu_err("fail to create mm\n"); 1592 if (mm) 1593 intel_gvt_mm_unreference(mm); 1594 return ERR_PTR(ret); 1595 } 1596 1597 /** 1598 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object 1599 * @mm: a vGPU mm object 1600 * 1601 * This function is called when user doesn't want to use a vGPU mm object 1602 */ 1603 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1604 { 1605 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) 1606 return; 1607 1608 atomic_dec(&mm->pincount); 1609 } 1610 1611 /** 1612 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object 1613 * @vgpu: a vGPU 1614 * 1615 * This function is called when user wants to use a vGPU mm object. If this 1616 * mm object hasn't been shadowed yet, the shadow will be populated at this 1617 * time. 1618 * 1619 * Returns: 1620 * Zero on success, negative error code if failed. 1621 */ 1622 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) 1623 { 1624 int ret; 1625 1626 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) 1627 return 0; 1628 1629 atomic_inc(&mm->pincount); 1630 1631 if (!mm->shadowed) { 1632 ret = shadow_mm(mm); 1633 if (ret) 1634 return ret; 1635 } 1636 1637 list_del_init(&mm->lru_list); 1638 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head); 1639 return 0; 1640 } 1641 1642 static int reclaim_one_mm(struct intel_gvt *gvt) 1643 { 1644 struct intel_vgpu_mm *mm; 1645 struct list_head *pos, *n; 1646 1647 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) { 1648 mm = container_of(pos, struct intel_vgpu_mm, lru_list); 1649 1650 if (mm->type != INTEL_GVT_MM_PPGTT) 1651 continue; 1652 if (atomic_read(&mm->pincount)) 1653 continue; 1654 1655 list_del_init(&mm->lru_list); 1656 invalidate_mm(mm); 1657 return 1; 1658 } 1659 return 0; 1660 } 1661 1662 /* 1663 * GMA translation APIs. 1664 */ 1665 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, 1666 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) 1667 { 1668 struct intel_vgpu *vgpu = mm->vgpu; 1669 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1670 struct intel_vgpu_ppgtt_spt *s; 1671 1672 if (WARN_ON(!mm->has_shadow_page_table)) 1673 return -EINVAL; 1674 1675 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 1676 if (!s) 1677 return -ENXIO; 1678 1679 if (!guest) 1680 ppgtt_get_shadow_entry(s, e, index); 1681 else 1682 ppgtt_get_guest_entry(s, e, index); 1683 return 0; 1684 } 1685 1686 /** 1687 * intel_vgpu_gma_to_gpa - translate a gma to GPA 1688 * @mm: mm object. could be a PPGTT or GGTT mm object 1689 * @gma: graphics memory address in this mm object 1690 * 1691 * This function is used to translate a graphics memory address in specific 1692 * graphics memory space to guest physical address. 1693 * 1694 * Returns: 1695 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed. 1696 */ 1697 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) 1698 { 1699 struct intel_vgpu *vgpu = mm->vgpu; 1700 struct intel_gvt *gvt = vgpu->gvt; 1701 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; 1702 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; 1703 unsigned long gpa = INTEL_GVT_INVALID_ADDR; 1704 unsigned long gma_index[4]; 1705 struct intel_gvt_gtt_entry e; 1706 int i, index; 1707 int ret; 1708 1709 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT) 1710 return INTEL_GVT_INVALID_ADDR; 1711 1712 if (mm->type == INTEL_GVT_MM_GGTT) { 1713 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1714 goto err; 1715 1716 ggtt_get_guest_entry(mm, &e, 1717 gma_ops->gma_to_ggtt_pte_index(gma)); 1718 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) 1719 + (gma & ~GTT_PAGE_MASK); 1720 1721 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); 1722 return gpa; 1723 } 1724 1725 switch (mm->page_table_level) { 1726 case 4: 1727 ppgtt_get_shadow_root_entry(mm, &e, 0); 1728 gma_index[0] = gma_ops->gma_to_pml4_index(gma); 1729 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 1730 gma_index[2] = gma_ops->gma_to_pde_index(gma); 1731 gma_index[3] = gma_ops->gma_to_pte_index(gma); 1732 index = 4; 1733 break; 1734 case 3: 1735 ppgtt_get_shadow_root_entry(mm, &e, 1736 gma_ops->gma_to_l3_pdp_index(gma)); 1737 gma_index[0] = gma_ops->gma_to_pde_index(gma); 1738 gma_index[1] = gma_ops->gma_to_pte_index(gma); 1739 index = 2; 1740 break; 1741 case 2: 1742 ppgtt_get_shadow_root_entry(mm, &e, 1743 gma_ops->gma_to_pde_index(gma)); 1744 gma_index[0] = gma_ops->gma_to_pte_index(gma); 1745 index = 1; 1746 break; 1747 default: 1748 WARN_ON(1); 1749 goto err; 1750 } 1751 1752 /* walk into the shadow page table and get gpa from guest entry */ 1753 for (i = 0; i < index; i++) { 1754 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 1755 (i == index - 1)); 1756 if (ret) 1757 goto err; 1758 } 1759 1760 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) 1761 + (gma & ~GTT_PAGE_MASK); 1762 1763 trace_gma_translate(vgpu->id, "ppgtt", 0, 1764 mm->page_table_level, gma, gpa); 1765 return gpa; 1766 err: 1767 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1768 return INTEL_GVT_INVALID_ADDR; 1769 } 1770 1771 static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu, 1772 unsigned int off, void *p_data, unsigned int bytes) 1773 { 1774 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1775 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1776 unsigned long index = off >> info->gtt_entry_size_shift; 1777 struct intel_gvt_gtt_entry e; 1778 1779 if (bytes != 4 && bytes != 8) 1780 return -EINVAL; 1781 1782 ggtt_get_guest_entry(ggtt_mm, &e, index); 1783 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), 1784 bytes); 1785 return 0; 1786 } 1787 1788 /** 1789 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read 1790 * @vgpu: a vGPU 1791 * @off: register offset 1792 * @p_data: data will be returned to guest 1793 * @bytes: data length 1794 * 1795 * This function is used to emulate the GTT MMIO register read 1796 * 1797 * Returns: 1798 * Zero on success, error code if failed. 1799 */ 1800 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 1801 void *p_data, unsigned int bytes) 1802 { 1803 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1804 int ret; 1805 1806 if (bytes != 4 && bytes != 8) 1807 return -EINVAL; 1808 1809 off -= info->gtt_start_offset; 1810 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes); 1811 return ret; 1812 } 1813 1814 static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1815 void *p_data, unsigned int bytes) 1816 { 1817 struct intel_gvt *gvt = vgpu->gvt; 1818 const struct intel_gvt_device_info *info = &gvt->device_info; 1819 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1820 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1821 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 1822 unsigned long gma; 1823 struct intel_gvt_gtt_entry e, m; 1824 int ret; 1825 1826 if (bytes != 4 && bytes != 8) 1827 return -EINVAL; 1828 1829 gma = g_gtt_index << GTT_PAGE_SHIFT; 1830 1831 /* the VM may configure the whole GM space when ballooning is used */ 1832 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1833 return 0; 1834 1835 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 1836 1837 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1838 bytes); 1839 1840 if (ops->test_present(&e)) { 1841 ret = gtt_entry_p2m(vgpu, &e, &m); 1842 if (ret) { 1843 gvt_vgpu_err("fail to translate guest gtt entry\n"); 1844 /* guest driver may read/write the entry when partial 1845 * update the entry in this situation p2m will fail 1846 * settting the shadow entry to point to a scratch page 1847 */ 1848 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); 1849 } 1850 } else { 1851 m = e; 1852 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); 1853 } 1854 1855 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1856 gtt_invalidate(gvt->dev_priv); 1857 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 1858 return 0; 1859 } 1860 1861 /* 1862 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write 1863 * @vgpu: a vGPU 1864 * @off: register offset 1865 * @p_data: data from guest write 1866 * @bytes: data length 1867 * 1868 * This function is used to emulate the GTT MMIO register write 1869 * 1870 * Returns: 1871 * Zero on success, error code if failed. 1872 */ 1873 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1874 void *p_data, unsigned int bytes) 1875 { 1876 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1877 int ret; 1878 1879 if (bytes != 4 && bytes != 8) 1880 return -EINVAL; 1881 1882 off -= info->gtt_start_offset; 1883 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes); 1884 return ret; 1885 } 1886 1887 static int alloc_scratch_pages(struct intel_vgpu *vgpu, 1888 intel_gvt_gtt_type_t type) 1889 { 1890 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 1891 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1892 int page_entry_num = GTT_PAGE_SIZE >> 1893 vgpu->gvt->device_info.gtt_entry_size_shift; 1894 void *scratch_pt; 1895 int i; 1896 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 1897 dma_addr_t daddr; 1898 1899 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1900 return -EINVAL; 1901 1902 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1903 if (!scratch_pt) { 1904 gvt_vgpu_err("fail to allocate scratch page\n"); 1905 return -ENOMEM; 1906 } 1907 1908 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1909 4096, PCI_DMA_BIDIRECTIONAL); 1910 if (dma_mapping_error(dev, daddr)) { 1911 gvt_vgpu_err("fail to dmamap scratch_pt\n"); 1912 __free_page(virt_to_page(scratch_pt)); 1913 return -ENOMEM; 1914 } 1915 gtt->scratch_pt[type].page_mfn = 1916 (unsigned long)(daddr >> GTT_PAGE_SHIFT); 1917 gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 1918 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1919 vgpu->id, type, gtt->scratch_pt[type].page_mfn); 1920 1921 /* Build the tree by full filled the scratch pt with the entries which 1922 * point to the next level scratch pt or scratch page. The 1923 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1924 * 'type' pt. 1925 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1926 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 1927 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1928 */ 1929 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 1930 struct intel_gvt_gtt_entry se; 1931 1932 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); 1933 se.type = get_entry_type(type - 1); 1934 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); 1935 1936 /* The entry parameters like present/writeable/cache type 1937 * set to the same as i915's scratch page tree. 1938 */ 1939 se.val64 |= _PAGE_PRESENT | _PAGE_RW; 1940 if (type == GTT_TYPE_PPGTT_PDE_PT) 1941 se.val64 |= PPAT_CACHED_INDEX; 1942 1943 for (i = 0; i < page_entry_num; i++) 1944 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 1945 } 1946 1947 return 0; 1948 } 1949 1950 static int release_scratch_page_tree(struct intel_vgpu *vgpu) 1951 { 1952 int i; 1953 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 1954 dma_addr_t daddr; 1955 1956 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 1957 if (vgpu->gtt.scratch_pt[i].page != NULL) { 1958 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << 1959 GTT_PAGE_SHIFT); 1960 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 1961 __free_page(vgpu->gtt.scratch_pt[i].page); 1962 vgpu->gtt.scratch_pt[i].page = NULL; 1963 vgpu->gtt.scratch_pt[i].page_mfn = 0; 1964 } 1965 } 1966 1967 return 0; 1968 } 1969 1970 static int create_scratch_page_tree(struct intel_vgpu *vgpu) 1971 { 1972 int i, ret; 1973 1974 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 1975 ret = alloc_scratch_pages(vgpu, i); 1976 if (ret) 1977 goto err; 1978 } 1979 1980 return 0; 1981 1982 err: 1983 release_scratch_page_tree(vgpu); 1984 return ret; 1985 } 1986 1987 /** 1988 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization 1989 * @vgpu: a vGPU 1990 * 1991 * This function is used to initialize per-vGPU graphics memory virtualization 1992 * components. 1993 * 1994 * Returns: 1995 * Zero on success, error code if failed. 1996 */ 1997 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) 1998 { 1999 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2000 struct intel_vgpu_mm *ggtt_mm; 2001 2002 hash_init(gtt->guest_page_hash_table); 2003 hash_init(gtt->shadow_page_hash_table); 2004 2005 INIT_LIST_HEAD(>t->mm_list_head); 2006 INIT_LIST_HEAD(>t->oos_page_list_head); 2007 INIT_LIST_HEAD(>t->post_shadow_list_head); 2008 2009 intel_vgpu_reset_ggtt(vgpu); 2010 2011 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2012 NULL, 1, 0); 2013 if (IS_ERR(ggtt_mm)) { 2014 gvt_vgpu_err("fail to create mm for ggtt.\n"); 2015 return PTR_ERR(ggtt_mm); 2016 } 2017 2018 gtt->ggtt_mm = ggtt_mm; 2019 2020 return create_scratch_page_tree(vgpu); 2021 } 2022 2023 static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) 2024 { 2025 struct list_head *pos, *n; 2026 struct intel_vgpu_mm *mm; 2027 2028 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { 2029 mm = container_of(pos, struct intel_vgpu_mm, list); 2030 if (mm->type == type) { 2031 vgpu->gvt->gtt.mm_free_page_table(mm); 2032 list_del(&mm->list); 2033 list_del(&mm->lru_list); 2034 kfree(mm); 2035 } 2036 } 2037 } 2038 2039 /** 2040 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2041 * @vgpu: a vGPU 2042 * 2043 * This function is used to clean up per-vGPU graphics memory virtualization 2044 * components. 2045 * 2046 * Returns: 2047 * Zero on success, error code if failed. 2048 */ 2049 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2050 { 2051 ppgtt_free_all_shadow_page(vgpu); 2052 release_scratch_page_tree(vgpu); 2053 2054 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2055 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); 2056 } 2057 2058 static void clean_spt_oos(struct intel_gvt *gvt) 2059 { 2060 struct intel_gvt_gtt *gtt = &gvt->gtt; 2061 struct list_head *pos, *n; 2062 struct intel_vgpu_oos_page *oos_page; 2063 2064 WARN(!list_empty(>t->oos_page_use_list_head), 2065 "someone is still using oos page\n"); 2066 2067 list_for_each_safe(pos, n, >t->oos_page_free_list_head) { 2068 oos_page = container_of(pos, struct intel_vgpu_oos_page, list); 2069 list_del(&oos_page->list); 2070 kfree(oos_page); 2071 } 2072 } 2073 2074 static int setup_spt_oos(struct intel_gvt *gvt) 2075 { 2076 struct intel_gvt_gtt *gtt = &gvt->gtt; 2077 struct intel_vgpu_oos_page *oos_page; 2078 int i; 2079 int ret; 2080 2081 INIT_LIST_HEAD(>t->oos_page_free_list_head); 2082 INIT_LIST_HEAD(>t->oos_page_use_list_head); 2083 2084 for (i = 0; i < preallocated_oos_pages; i++) { 2085 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2086 if (!oos_page) { 2087 ret = -ENOMEM; 2088 goto fail; 2089 } 2090 2091 INIT_LIST_HEAD(&oos_page->list); 2092 INIT_LIST_HEAD(&oos_page->vm_list); 2093 oos_page->id = i; 2094 list_add_tail(&oos_page->list, >t->oos_page_free_list_head); 2095 } 2096 2097 gvt_dbg_mm("%d oos pages preallocated\n", i); 2098 2099 return 0; 2100 fail: 2101 clean_spt_oos(gvt); 2102 return ret; 2103 } 2104 2105 /** 2106 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object 2107 * @vgpu: a vGPU 2108 * @page_table_level: PPGTT page table level 2109 * @root_entry: PPGTT page table root pointers 2110 * 2111 * This function is used to find a PPGTT mm object from mm object pool 2112 * 2113 * Returns: 2114 * pointer to mm object on success, NULL if failed. 2115 */ 2116 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 2117 int page_table_level, void *root_entry) 2118 { 2119 struct list_head *pos; 2120 struct intel_vgpu_mm *mm; 2121 u64 *src, *dst; 2122 2123 list_for_each(pos, &vgpu->gtt.mm_list_head) { 2124 mm = container_of(pos, struct intel_vgpu_mm, list); 2125 if (mm->type != INTEL_GVT_MM_PPGTT) 2126 continue; 2127 2128 if (mm->page_table_level != page_table_level) 2129 continue; 2130 2131 src = root_entry; 2132 dst = mm->virtual_page_table; 2133 2134 if (page_table_level == 3) { 2135 if (src[0] == dst[0] 2136 && src[1] == dst[1] 2137 && src[2] == dst[2] 2138 && src[3] == dst[3]) 2139 return mm; 2140 } else { 2141 if (src[0] == dst[0]) 2142 return mm; 2143 } 2144 } 2145 return NULL; 2146 } 2147 2148 /** 2149 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from 2150 * g2v notification 2151 * @vgpu: a vGPU 2152 * @page_table_level: PPGTT page table level 2153 * 2154 * This function is used to create a PPGTT mm object from a guest to GVT-g 2155 * notification. 2156 * 2157 * Returns: 2158 * Zero on success, negative error code if failed. 2159 */ 2160 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, 2161 int page_table_level) 2162 { 2163 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); 2164 struct intel_vgpu_mm *mm; 2165 2166 if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) 2167 return -EINVAL; 2168 2169 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2170 if (mm) { 2171 intel_gvt_mm_reference(mm); 2172 } else { 2173 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2174 pdp, page_table_level, 0); 2175 if (IS_ERR(mm)) { 2176 gvt_vgpu_err("fail to create mm\n"); 2177 return PTR_ERR(mm); 2178 } 2179 } 2180 return 0; 2181 } 2182 2183 /** 2184 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from 2185 * g2v notification 2186 * @vgpu: a vGPU 2187 * @page_table_level: PPGTT page table level 2188 * 2189 * This function is used to create a PPGTT mm object from a guest to GVT-g 2190 * notification. 2191 * 2192 * Returns: 2193 * Zero on success, negative error code if failed. 2194 */ 2195 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, 2196 int page_table_level) 2197 { 2198 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); 2199 struct intel_vgpu_mm *mm; 2200 2201 if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) 2202 return -EINVAL; 2203 2204 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2205 if (!mm) { 2206 gvt_vgpu_err("fail to find ppgtt instance.\n"); 2207 return -EINVAL; 2208 } 2209 intel_gvt_mm_unreference(mm); 2210 return 0; 2211 } 2212 2213 /** 2214 * intel_gvt_init_gtt - initialize mm components of a GVT device 2215 * @gvt: GVT device 2216 * 2217 * This function is called at the initialization stage, to initialize 2218 * the mm components of a GVT device. 2219 * 2220 * Returns: 2221 * zero on success, negative error code if failed. 2222 */ 2223 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2224 { 2225 int ret; 2226 void *page; 2227 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2228 dma_addr_t daddr; 2229 2230 gvt_dbg_core("init gtt\n"); 2231 2232 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) 2233 || IS_KABYLAKE(gvt->dev_priv)) { 2234 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2235 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2236 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; 2237 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table; 2238 } else { 2239 return -ENODEV; 2240 } 2241 2242 page = (void *)get_zeroed_page(GFP_KERNEL); 2243 if (!page) { 2244 gvt_err("fail to allocate scratch ggtt page\n"); 2245 return -ENOMEM; 2246 } 2247 2248 daddr = dma_map_page(dev, virt_to_page(page), 0, 2249 4096, PCI_DMA_BIDIRECTIONAL); 2250 if (dma_mapping_error(dev, daddr)) { 2251 gvt_err("fail to dmamap scratch ggtt page\n"); 2252 __free_page(virt_to_page(page)); 2253 return -ENOMEM; 2254 } 2255 gvt->gtt.scratch_ggtt_page = virt_to_page(page); 2256 gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); 2257 2258 if (enable_out_of_sync) { 2259 ret = setup_spt_oos(gvt); 2260 if (ret) { 2261 gvt_err("fail to initialize SPT oos\n"); 2262 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2263 __free_page(gvt->gtt.scratch_ggtt_page); 2264 return ret; 2265 } 2266 } 2267 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head); 2268 return 0; 2269 } 2270 2271 /** 2272 * intel_gvt_clean_gtt - clean up mm components of a GVT device 2273 * @gvt: GVT device 2274 * 2275 * This function is called at the driver unloading stage, to clean up the 2276 * the mm components of a GVT device. 2277 * 2278 */ 2279 void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2280 { 2281 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2282 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << 2283 GTT_PAGE_SHIFT); 2284 2285 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2286 2287 __free_page(gvt->gtt.scratch_ggtt_page); 2288 2289 if (enable_out_of_sync) 2290 clean_spt_oos(gvt); 2291 } 2292 2293 /** 2294 * intel_vgpu_reset_ggtt - reset the GGTT entry 2295 * @vgpu: a vGPU 2296 * 2297 * This function is called at the vGPU create stage 2298 * to reset all the GGTT entries. 2299 * 2300 */ 2301 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2302 { 2303 struct intel_gvt *gvt = vgpu->gvt; 2304 struct drm_i915_private *dev_priv = gvt->dev_priv; 2305 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2306 u32 index; 2307 u32 offset; 2308 u32 num_entries; 2309 struct intel_gvt_gtt_entry e; 2310 2311 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); 2312 e.type = GTT_TYPE_GGTT_PTE; 2313 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); 2314 e.val64 |= _PAGE_PRESENT; 2315 2316 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2317 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2318 for (offset = 0; offset < num_entries; offset++) 2319 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2320 2321 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2322 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2323 for (offset = 0; offset < num_entries; offset++) 2324 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2325 2326 gtt_invalidate(dev_priv); 2327 } 2328 2329 /** 2330 * intel_vgpu_reset_gtt - reset the all GTT related status 2331 * @vgpu: a vGPU 2332 * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset 2333 * 2334 * This function is called from vfio core to reset reset all 2335 * GTT related status, including GGTT, PPGTT, scratch page. 2336 * 2337 */ 2338 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) 2339 { 2340 int i; 2341 2342 ppgtt_free_all_shadow_page(vgpu); 2343 2344 /* Shadow pages are only created when there is no page 2345 * table tracking data, so remove page tracking data after 2346 * removing the shadow pages. 2347 */ 2348 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2349 2350 if (!dmlr) 2351 return; 2352 2353 intel_vgpu_reset_ggtt(vgpu); 2354 2355 /* clear scratch page for security */ 2356 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2357 if (vgpu->gtt.scratch_pt[i].page != NULL) 2358 memset(page_address(vgpu->gtt.scratch_pt[i].page), 2359 0, PAGE_SIZE); 2360 } 2361 } 2362