1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/log2.h> 7 8 #include "gem/i915_gem_lmem.h" 9 10 #include "gen8_ppgtt.h" 11 #include "i915_scatterlist.h" 12 #include "i915_trace.h" 13 #include "i915_pvinfo.h" 14 #include "i915_vgpu.h" 15 #include "intel_gt.h" 16 #include "intel_gtt.h" 17 18 static u64 gen8_pde_encode(const dma_addr_t addr, 19 const enum i915_cache_level level) 20 { 21 u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; 22 23 if (level != I915_CACHE_NONE) 24 pde |= PPAT_CACHED_PDE; 25 else 26 pde |= PPAT_UNCACHED; 27 28 return pde; 29 } 30 31 static u64 gen8_pte_encode(dma_addr_t addr, 32 enum i915_cache_level level, 33 u32 flags) 34 { 35 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; 36 37 if (unlikely(flags & PTE_READ_ONLY)) 38 pte &= ~_PAGE_RW; 39 40 if (flags & PTE_LM) 41 pte |= GEN12_PPGTT_PTE_LM; 42 43 switch (level) { 44 case I915_CACHE_NONE: 45 pte |= PPAT_UNCACHED; 46 break; 47 case I915_CACHE_WT: 48 pte |= PPAT_DISPLAY_ELLC; 49 break; 50 default: 51 pte |= PPAT_CACHED; 52 break; 53 } 54 55 return pte; 56 } 57 58 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) 59 { 60 struct drm_i915_private *i915 = ppgtt->vm.i915; 61 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; 62 enum vgt_g2v_type msg; 63 int i; 64 65 if (create) 66 atomic_inc(px_used(ppgtt->pd)); /* never remove */ 67 else 68 atomic_dec(px_used(ppgtt->pd)); 69 70 mutex_lock(&i915->vgpu.lock); 71 72 if (i915_vm_is_4lvl(&ppgtt->vm)) { 73 const u64 daddr = px_dma(ppgtt->pd); 74 75 intel_uncore_write(uncore, 76 vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); 77 intel_uncore_write(uncore, 78 vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); 79 80 msg = create ? 81 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : 82 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY; 83 } else { 84 for (i = 0; i < GEN8_3LVL_PDPES; i++) { 85 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); 86 87 intel_uncore_write(uncore, 88 vgtif_reg(pdp[i].lo), 89 lower_32_bits(daddr)); 90 intel_uncore_write(uncore, 91 vgtif_reg(pdp[i].hi), 92 upper_32_bits(daddr)); 93 } 94 95 msg = create ? 96 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : 97 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY; 98 } 99 100 /* g2v_notify atomically (via hv trap) consumes the message packet. */ 101 intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg); 102 103 mutex_unlock(&i915->vgpu.lock); 104 } 105 106 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ 107 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */ 108 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE)) 109 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64)) 110 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES)) 111 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl)) 112 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl)) 113 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl)) 114 115 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) 116 117 static unsigned int 118 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) 119 { 120 const int shift = gen8_pd_shift(lvl); 121 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); 122 123 GEM_BUG_ON(start >= end); 124 end += ~mask >> gen8_pd_shift(1); 125 126 *idx = i915_pde_index(start, shift); 127 if ((start ^ end) & mask) 128 return GEN8_PDES - *idx; 129 else 130 return i915_pde_index(end, shift) - *idx; 131 } 132 133 static bool gen8_pd_contains(u64 start, u64 end, int lvl) 134 { 135 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); 136 137 GEM_BUG_ON(start >= end); 138 return (start ^ end) & mask && (start & ~mask) == 0; 139 } 140 141 static unsigned int gen8_pt_count(u64 start, u64 end) 142 { 143 GEM_BUG_ON(start >= end); 144 if ((start ^ end) >> gen8_pd_shift(1)) 145 return GEN8_PDES - (start & (GEN8_PDES - 1)); 146 else 147 return end - start; 148 } 149 150 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) 151 { 152 unsigned int shift = __gen8_pte_shift(vm->top); 153 154 return (vm->total + (1ull << shift) - 1) >> shift; 155 } 156 157 static struct i915_page_directory * 158 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) 159 { 160 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); 161 162 if (vm->top == 2) 163 return ppgtt->pd; 164 else 165 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); 166 } 167 168 static struct i915_page_directory * 169 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) 170 { 171 return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT); 172 } 173 174 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, 175 struct i915_page_directory *pd, 176 int count, int lvl) 177 { 178 if (lvl) { 179 void **pde = pd->entry; 180 181 do { 182 if (!*pde) 183 continue; 184 185 __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); 186 } while (pde++, --count); 187 } 188 189 free_px(vm, &pd->pt, lvl); 190 } 191 192 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 193 { 194 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 195 196 if (intel_vgpu_active(vm->i915)) 197 gen8_ppgtt_notify_vgt(ppgtt, false); 198 199 __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); 200 free_scratch(vm); 201 } 202 203 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, 204 struct i915_page_directory * const pd, 205 u64 start, const u64 end, int lvl) 206 { 207 const struct drm_i915_gem_object * const scratch = vm->scratch[lvl]; 208 unsigned int idx, len; 209 210 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); 211 212 len = gen8_pd_range(start, end, lvl--, &idx); 213 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", 214 __func__, vm, lvl + 1, start, end, 215 idx, len, atomic_read(px_used(pd))); 216 GEM_BUG_ON(!len || len >= atomic_read(px_used(pd))); 217 218 do { 219 struct i915_page_table *pt = pd->entry[idx]; 220 221 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) && 222 gen8_pd_contains(start, end, lvl)) { 223 DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n", 224 __func__, vm, lvl + 1, idx, start, end); 225 clear_pd_entry(pd, idx, scratch); 226 __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl); 227 start += (u64)I915_PDES << gen8_pd_shift(lvl); 228 continue; 229 } 230 231 if (lvl) { 232 start = __gen8_ppgtt_clear(vm, as_pd(pt), 233 start, end, lvl); 234 } else { 235 unsigned int count; 236 u64 *vaddr; 237 238 count = gen8_pt_count(start, end); 239 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n", 240 __func__, vm, lvl, start, end, 241 gen8_pd_index(start, 0), count, 242 atomic_read(&pt->used)); 243 GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); 244 245 vaddr = px_vaddr(pt); 246 memset64(vaddr + gen8_pd_index(start, 0), 247 vm->scratch[0]->encode, 248 count); 249 250 atomic_sub(count, &pt->used); 251 start += count; 252 } 253 254 if (release_pd_entry(pd, idx, pt, scratch)) 255 free_px(vm, pt, lvl); 256 } while (idx++, --len); 257 258 return start; 259 } 260 261 static void gen8_ppgtt_clear(struct i915_address_space *vm, 262 u64 start, u64 length) 263 { 264 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); 265 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); 266 GEM_BUG_ON(range_overflows(start, length, vm->total)); 267 268 start >>= GEN8_PTE_SHIFT; 269 length >>= GEN8_PTE_SHIFT; 270 GEM_BUG_ON(length == 0); 271 272 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, 273 start, start + length, vm->top); 274 } 275 276 static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, 277 struct i915_vm_pt_stash *stash, 278 struct i915_page_directory * const pd, 279 u64 * const start, const u64 end, int lvl) 280 { 281 unsigned int idx, len; 282 283 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); 284 285 len = gen8_pd_range(*start, end, lvl--, &idx); 286 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n", 287 __func__, vm, lvl + 1, *start, end, 288 idx, len, atomic_read(px_used(pd))); 289 GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1)); 290 291 spin_lock(&pd->lock); 292 GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */ 293 do { 294 struct i915_page_table *pt = pd->entry[idx]; 295 296 if (!pt) { 297 spin_unlock(&pd->lock); 298 299 DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", 300 __func__, vm, lvl + 1, idx); 301 302 pt = stash->pt[!!lvl]; 303 __i915_gem_object_pin_pages(pt->base); 304 i915_gem_object_make_unshrinkable(pt->base); 305 306 if (lvl || 307 gen8_pt_count(*start, end) < I915_PDES || 308 intel_vgpu_active(vm->i915)) 309 fill_px(pt, vm->scratch[lvl]->encode); 310 311 spin_lock(&pd->lock); 312 if (likely(!pd->entry[idx])) { 313 stash->pt[!!lvl] = pt->stash; 314 atomic_set(&pt->used, 0); 315 set_pd_entry(pd, idx, pt); 316 } else { 317 pt = pd->entry[idx]; 318 } 319 } 320 321 if (lvl) { 322 atomic_inc(&pt->used); 323 spin_unlock(&pd->lock); 324 325 __gen8_ppgtt_alloc(vm, stash, 326 as_pd(pt), start, end, lvl); 327 328 spin_lock(&pd->lock); 329 atomic_dec(&pt->used); 330 GEM_BUG_ON(!atomic_read(&pt->used)); 331 } else { 332 unsigned int count = gen8_pt_count(*start, end); 333 334 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n", 335 __func__, vm, lvl, *start, end, 336 gen8_pd_index(*start, 0), count, 337 atomic_read(&pt->used)); 338 339 atomic_add(count, &pt->used); 340 /* All other pdes may be simultaneously removed */ 341 GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES); 342 *start += count; 343 } 344 } while (idx++, --len); 345 spin_unlock(&pd->lock); 346 } 347 348 static void gen8_ppgtt_alloc(struct i915_address_space *vm, 349 struct i915_vm_pt_stash *stash, 350 u64 start, u64 length) 351 { 352 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); 353 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); 354 GEM_BUG_ON(range_overflows(start, length, vm->total)); 355 356 start >>= GEN8_PTE_SHIFT; 357 length >>= GEN8_PTE_SHIFT; 358 GEM_BUG_ON(length == 0); 359 360 __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd, 361 &start, start + length, vm->top); 362 } 363 364 static __always_inline u64 365 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, 366 struct i915_page_directory *pdp, 367 struct sgt_dma *iter, 368 u64 idx, 369 enum i915_cache_level cache_level, 370 u32 flags) 371 { 372 struct i915_page_directory *pd; 373 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); 374 gen8_pte_t *vaddr; 375 376 pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); 377 vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); 378 do { 379 GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE); 380 vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; 381 382 iter->dma += I915_GTT_PAGE_SIZE; 383 if (iter->dma >= iter->max) { 384 iter->sg = __sg_next(iter->sg); 385 if (!iter->sg || sg_dma_len(iter->sg) == 0) { 386 idx = 0; 387 break; 388 } 389 390 iter->dma = sg_dma_address(iter->sg); 391 iter->max = iter->dma + sg_dma_len(iter->sg); 392 } 393 394 if (gen8_pd_index(++idx, 0) == 0) { 395 if (gen8_pd_index(idx, 1) == 0) { 396 /* Limited by sg length for 3lvl */ 397 if (gen8_pd_index(idx, 2) == 0) 398 break; 399 400 pd = pdp->entry[gen8_pd_index(idx, 2)]; 401 } 402 403 clflush_cache_range(vaddr, PAGE_SIZE); 404 vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); 405 } 406 } while (1); 407 clflush_cache_range(vaddr, PAGE_SIZE); 408 409 return idx; 410 } 411 412 static void gen8_ppgtt_insert_huge(struct i915_vma *vma, 413 struct sgt_dma *iter, 414 enum i915_cache_level cache_level, 415 u32 flags) 416 { 417 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); 418 unsigned int rem = sg_dma_len(iter->sg); 419 u64 start = vma->node.start; 420 421 GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); 422 423 do { 424 struct i915_page_directory * const pdp = 425 gen8_pdp_for_page_address(vma->vm, start); 426 struct i915_page_directory * const pd = 427 i915_pd_entry(pdp, __gen8_pte_index(start, 2)); 428 gen8_pte_t encode = pte_encode; 429 unsigned int maybe_64K = -1; 430 unsigned int page_size; 431 gen8_pte_t *vaddr; 432 u16 index; 433 434 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && 435 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && 436 rem >= I915_GTT_PAGE_SIZE_2M && 437 !__gen8_pte_index(start, 0)) { 438 index = __gen8_pte_index(start, 1); 439 encode |= GEN8_PDE_PS_2M; 440 page_size = I915_GTT_PAGE_SIZE_2M; 441 442 vaddr = px_vaddr(pd); 443 } else { 444 struct i915_page_table *pt = 445 i915_pt_entry(pd, __gen8_pte_index(start, 1)); 446 447 index = __gen8_pte_index(start, 0); 448 page_size = I915_GTT_PAGE_SIZE; 449 450 if (!index && 451 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && 452 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 453 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 454 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) 455 maybe_64K = __gen8_pte_index(start, 1); 456 457 vaddr = px_vaddr(pt); 458 } 459 460 do { 461 GEM_BUG_ON(sg_dma_len(iter->sg) < page_size); 462 vaddr[index++] = encode | iter->dma; 463 464 start += page_size; 465 iter->dma += page_size; 466 rem -= page_size; 467 if (iter->dma >= iter->max) { 468 iter->sg = __sg_next(iter->sg); 469 if (!iter->sg) 470 break; 471 472 rem = sg_dma_len(iter->sg); 473 if (!rem) 474 break; 475 476 iter->dma = sg_dma_address(iter->sg); 477 iter->max = iter->dma + rem; 478 479 if (maybe_64K != -1 && index < I915_PDES && 480 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 481 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 482 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))) 483 maybe_64K = -1; 484 485 if (unlikely(!IS_ALIGNED(iter->dma, page_size))) 486 break; 487 } 488 } while (rem >= page_size && index < I915_PDES); 489 490 clflush_cache_range(vaddr, PAGE_SIZE); 491 492 /* 493 * Is it safe to mark the 2M block as 64K? -- Either we have 494 * filled whole page-table with 64K entries, or filled part of 495 * it and have reached the end of the sg table and we have 496 * enough padding. 497 */ 498 if (maybe_64K != -1 && 499 (index == I915_PDES || 500 (i915_vm_has_scratch_64K(vma->vm) && 501 !iter->sg && IS_ALIGNED(vma->node.start + 502 vma->node.size, 503 I915_GTT_PAGE_SIZE_2M)))) { 504 vaddr = px_vaddr(pd); 505 vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; 506 page_size = I915_GTT_PAGE_SIZE_64K; 507 508 /* 509 * We write all 4K page entries, even when using 64K 510 * pages. In order to verify that the HW isn't cheating 511 * by using the 4K PTE instead of the 64K PTE, we want 512 * to remove all the surplus entries. If the HW skipped 513 * the 64K PTE, it will read/write into the scratch page 514 * instead - which we detect as missing results during 515 * selftests. 516 */ 517 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { 518 u16 i; 519 520 encode = vma->vm->scratch[0]->encode; 521 vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K)); 522 523 for (i = 1; i < index; i += 16) 524 memset64(vaddr + i, encode, 15); 525 526 } 527 } 528 529 vma->page_sizes.gtt |= page_size; 530 } while (iter->sg && sg_dma_len(iter->sg)); 531 } 532 533 static void gen8_ppgtt_insert(struct i915_address_space *vm, 534 struct i915_vma *vma, 535 enum i915_cache_level cache_level, 536 u32 flags) 537 { 538 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); 539 struct sgt_dma iter = sgt_dma(vma); 540 541 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 542 gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags); 543 } else { 544 u64 idx = vma->node.start >> GEN8_PTE_SHIFT; 545 546 do { 547 struct i915_page_directory * const pdp = 548 gen8_pdp_for_page_index(vm, idx); 549 550 idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx, 551 cache_level, flags); 552 } while (idx); 553 554 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 555 } 556 } 557 558 static void gen8_ppgtt_insert_entry(struct i915_address_space *vm, 559 dma_addr_t addr, 560 u64 offset, 561 enum i915_cache_level level, 562 u32 flags) 563 { 564 u64 idx = offset >> GEN8_PTE_SHIFT; 565 struct i915_page_directory * const pdp = 566 gen8_pdp_for_page_index(vm, idx); 567 struct i915_page_directory *pd = 568 i915_pd_entry(pdp, gen8_pd_index(idx, 2)); 569 gen8_pte_t *vaddr; 570 571 vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); 572 vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags); 573 clflush_cache_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr)); 574 } 575 576 static int gen8_init_scratch(struct i915_address_space *vm) 577 { 578 u32 pte_flags; 579 int ret; 580 int i; 581 582 /* 583 * If everybody agrees to not to write into the scratch page, 584 * we can reuse it for all vm, keeping contexts and processes separate. 585 */ 586 if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { 587 struct i915_address_space *clone = vm->gt->vm; 588 589 GEM_BUG_ON(!clone->has_read_only); 590 591 vm->scratch_order = clone->scratch_order; 592 for (i = 0; i <= vm->top; i++) 593 vm->scratch[i] = i915_gem_object_get(clone->scratch[i]); 594 595 return 0; 596 } 597 598 ret = setup_scratch_page(vm); 599 if (ret) 600 return ret; 601 602 pte_flags = vm->has_read_only; 603 if (i915_gem_object_is_lmem(vm->scratch[0])) 604 pte_flags |= PTE_LM; 605 606 vm->scratch[0]->encode = 607 gen8_pte_encode(px_dma(vm->scratch[0]), 608 I915_CACHE_LLC, pte_flags); 609 610 for (i = 1; i <= vm->top; i++) { 611 struct drm_i915_gem_object *obj; 612 613 obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); 614 if (IS_ERR(obj)) 615 goto free_scratch; 616 617 ret = map_pt_dma(vm, obj); 618 if (ret) { 619 i915_gem_object_put(obj); 620 goto free_scratch; 621 } 622 623 fill_px(obj, vm->scratch[i - 1]->encode); 624 obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC); 625 626 vm->scratch[i] = obj; 627 } 628 629 return 0; 630 631 free_scratch: 632 while (i--) 633 i915_gem_object_put(vm->scratch[i]); 634 return -ENOMEM; 635 } 636 637 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) 638 { 639 struct i915_address_space *vm = &ppgtt->vm; 640 struct i915_page_directory *pd = ppgtt->pd; 641 unsigned int idx; 642 643 GEM_BUG_ON(vm->top != 2); 644 GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES); 645 646 for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { 647 struct i915_page_directory *pde; 648 int err; 649 650 pde = alloc_pd(vm); 651 if (IS_ERR(pde)) 652 return PTR_ERR(pde); 653 654 err = map_pt_dma(vm, pde->pt.base); 655 if (err) { 656 free_pd(vm, pde); 657 return err; 658 } 659 660 fill_px(pde, vm->scratch[1]->encode); 661 set_pd_entry(pd, idx, pde); 662 atomic_inc(px_used(pde)); /* keep pinned */ 663 } 664 wmb(); 665 666 return 0; 667 } 668 669 static struct i915_page_directory * 670 gen8_alloc_top_pd(struct i915_address_space *vm) 671 { 672 const unsigned int count = gen8_pd_top_count(vm); 673 struct i915_page_directory *pd; 674 int err; 675 676 GEM_BUG_ON(count > I915_PDES); 677 678 pd = __alloc_pd(count); 679 if (unlikely(!pd)) 680 return ERR_PTR(-ENOMEM); 681 682 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); 683 if (IS_ERR(pd->pt.base)) { 684 err = PTR_ERR(pd->pt.base); 685 pd->pt.base = NULL; 686 goto err_pd; 687 } 688 689 err = map_pt_dma(vm, pd->pt.base); 690 if (err) 691 goto err_pd; 692 693 fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count); 694 atomic_inc(px_used(pd)); /* mark as pinned */ 695 return pd; 696 697 err_pd: 698 free_pd(vm, pd); 699 return ERR_PTR(err); 700 } 701 702 /* 703 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 704 * with a net effect resembling a 2-level page table in normal x86 terms. Each 705 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address 706 * space. 707 * 708 */ 709 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) 710 { 711 struct i915_ppgtt *ppgtt; 712 int err; 713 714 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 715 if (!ppgtt) 716 return ERR_PTR(-ENOMEM); 717 718 ppgtt_init(ppgtt, gt); 719 ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; 720 ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t)); 721 722 /* 723 * From bdw, there is hw support for read-only pages in the PPGTT. 724 * 725 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support 726 * for now. 727 * 728 * Gen12 has inherited the same read-only fault issue from gen11. 729 */ 730 ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12); 731 732 if (HAS_LMEM(gt->i915)) 733 ppgtt->vm.alloc_pt_dma = alloc_pt_lmem; 734 else 735 ppgtt->vm.alloc_pt_dma = alloc_pt_dma; 736 737 err = gen8_init_scratch(&ppgtt->vm); 738 if (err) 739 goto err_free; 740 741 ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); 742 if (IS_ERR(ppgtt->pd)) { 743 err = PTR_ERR(ppgtt->pd); 744 goto err_free_scratch; 745 } 746 747 if (!i915_vm_is_4lvl(&ppgtt->vm)) { 748 err = gen8_preallocate_top_level_pdp(ppgtt); 749 if (err) 750 goto err_free_pd; 751 } 752 753 ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; 754 ppgtt->vm.insert_entries = gen8_ppgtt_insert; 755 ppgtt->vm.insert_page = gen8_ppgtt_insert_entry; 756 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; 757 ppgtt->vm.clear_range = gen8_ppgtt_clear; 758 759 ppgtt->vm.pte_encode = gen8_pte_encode; 760 761 if (intel_vgpu_active(gt->i915)) 762 gen8_ppgtt_notify_vgt(ppgtt, true); 763 764 ppgtt->vm.cleanup = gen8_ppgtt_cleanup; 765 766 return ppgtt; 767 768 err_free_pd: 769 __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, 770 gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); 771 err_free_scratch: 772 free_scratch(&ppgtt->vm); 773 err_free: 774 kfree(ppgtt); 775 return ERR_PTR(err); 776 } 777