1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/stop_machine.h> 7 8 #include <asm/set_memory.h> 9 #include <asm/smp.h> 10 11 #include <drm/i915_drm.h> 12 13 #include "gem/i915_gem_lmem.h" 14 15 #include "intel_gt.h" 16 #include "i915_drv.h" 17 #include "i915_scatterlist.h" 18 #include "i915_vgpu.h" 19 20 #include "intel_gtt.h" 21 22 static int 23 i915_get_ggtt_vma_pages(struct i915_vma *vma); 24 25 static void i915_ggtt_color_adjust(const struct drm_mm_node *node, 26 unsigned long color, 27 u64 *start, 28 u64 *end) 29 { 30 if (i915_node_color_differs(node, color)) 31 *start += I915_GTT_PAGE_SIZE; 32 33 /* 34 * Also leave a space between the unallocated reserved node after the 35 * GTT and any objects within the GTT, i.e. we use the color adjustment 36 * to insert a guard page to prevent prefetches crossing over the 37 * GTT boundary. 38 */ 39 node = list_next_entry(node, node_list); 40 if (node->color != color) 41 *end -= I915_GTT_PAGE_SIZE; 42 } 43 44 static int ggtt_init_hw(struct i915_ggtt *ggtt) 45 { 46 struct drm_i915_private *i915 = ggtt->vm.i915; 47 48 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); 49 50 ggtt->vm.is_ggtt = true; 51 52 /* Only VLV supports read-only GGTT mappings */ 53 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); 54 55 if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) 56 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; 57 58 if (ggtt->mappable_end) { 59 if (!io_mapping_init_wc(&ggtt->iomap, 60 ggtt->gmadr.start, 61 ggtt->mappable_end)) { 62 ggtt->vm.cleanup(&ggtt->vm); 63 return -EIO; 64 } 65 66 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, 67 ggtt->mappable_end); 68 } 69 70 intel_ggtt_init_fences(ggtt); 71 72 return 0; 73 } 74 75 /** 76 * i915_ggtt_init_hw - Initialize GGTT hardware 77 * @i915: i915 device 78 */ 79 int i915_ggtt_init_hw(struct drm_i915_private *i915) 80 { 81 int ret; 82 83 /* 84 * Note that we use page colouring to enforce a guard page at the 85 * end of the address space. This is required as the CS may prefetch 86 * beyond the end of the batch buffer, across the page boundary, 87 * and beyond the end of the GTT if we do not provide a guard. 88 */ 89 ret = ggtt_init_hw(&i915->ggtt); 90 if (ret) 91 return ret; 92 93 return 0; 94 } 95 96 /* 97 * Certain Gen5 chipsets require idling the GPU before 98 * unmapping anything from the GTT when VT-d is enabled. 99 */ 100 static bool needs_idle_maps(struct drm_i915_private *i915) 101 { 102 /* 103 * Query intel_iommu to see if we need the workaround. Presumably that 104 * was loaded first. 105 */ 106 if (!intel_vtd_active()) 107 return false; 108 109 if (IS_GEN(i915, 5) && IS_MOBILE(i915)) 110 return true; 111 112 if (IS_GEN(i915, 12)) 113 return true; /* XXX DMAR fault reason 7 */ 114 115 return false; 116 } 117 118 void i915_ggtt_suspend(struct i915_ggtt *ggtt) 119 { 120 struct i915_vma *vma, *vn; 121 int open; 122 123 mutex_lock(&ggtt->vm.mutex); 124 125 /* Skip rewriting PTE on VMA unbind. */ 126 open = atomic_xchg(&ggtt->vm.open, 0); 127 128 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { 129 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 130 i915_vma_wait_for_bind(vma); 131 132 if (i915_vma_is_pinned(vma)) 133 continue; 134 135 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { 136 __i915_vma_evict(vma); 137 drm_mm_remove_node(&vma->node); 138 } 139 } 140 141 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 142 ggtt->invalidate(ggtt); 143 atomic_set(&ggtt->vm.open, open); 144 145 mutex_unlock(&ggtt->vm.mutex); 146 147 intel_gt_check_and_clear_faults(ggtt->vm.gt); 148 } 149 150 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) 151 { 152 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 153 154 spin_lock_irq(&uncore->lock); 155 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 156 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); 157 spin_unlock_irq(&uncore->lock); 158 } 159 160 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) 161 { 162 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 163 164 /* 165 * Note that as an uncached mmio write, this will flush the 166 * WCB of the writes into the GGTT before it triggers the invalidate. 167 */ 168 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 169 } 170 171 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) 172 { 173 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 174 struct drm_i915_private *i915 = ggtt->vm.i915; 175 176 gen8_ggtt_invalidate(ggtt); 177 178 if (INTEL_GEN(i915) >= 12) 179 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, 180 GEN12_GUC_TLB_INV_CR_INVALIDATE); 181 else 182 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); 183 } 184 185 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) 186 { 187 intel_gtt_chipset_flush(); 188 } 189 190 static u64 gen8_ggtt_pte_encode(dma_addr_t addr, 191 enum i915_cache_level level, 192 u32 flags) 193 { 194 gen8_pte_t pte = addr | _PAGE_PRESENT; 195 196 if (flags & PTE_LM) 197 pte |= GEN12_GGTT_PTE_LM; 198 199 return pte; 200 } 201 202 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 203 { 204 writeq(pte, addr); 205 } 206 207 static void gen8_ggtt_insert_page(struct i915_address_space *vm, 208 dma_addr_t addr, 209 u64 offset, 210 enum i915_cache_level level, 211 u32 flags) 212 { 213 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 214 gen8_pte_t __iomem *pte = 215 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 216 217 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags)); 218 219 ggtt->invalidate(ggtt); 220 } 221 222 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 223 struct i915_vma *vma, 224 enum i915_cache_level level, 225 u32 flags) 226 { 227 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags); 228 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 229 gen8_pte_t __iomem *gte; 230 gen8_pte_t __iomem *end; 231 struct sgt_iter iter; 232 dma_addr_t addr; 233 234 /* 235 * Note that we ignore PTE_READ_ONLY here. The caller must be careful 236 * not to allow the user to override access to a read only page. 237 */ 238 239 gte = (gen8_pte_t __iomem *)ggtt->gsm; 240 gte += vma->node.start / I915_GTT_PAGE_SIZE; 241 end = gte + vma->node.size / I915_GTT_PAGE_SIZE; 242 243 for_each_sgt_daddr(addr, iter, vma->pages) 244 gen8_set_pte(gte++, pte_encode | addr); 245 GEM_BUG_ON(gte > end); 246 247 /* Fill the allocated but "unused" space beyond the end of the buffer */ 248 while (gte < end) 249 gen8_set_pte(gte++, vm->scratch[0]->encode); 250 251 /* 252 * We want to flush the TLBs only after we're certain all the PTE 253 * updates have finished. 254 */ 255 ggtt->invalidate(ggtt); 256 } 257 258 static void gen6_ggtt_insert_page(struct i915_address_space *vm, 259 dma_addr_t addr, 260 u64 offset, 261 enum i915_cache_level level, 262 u32 flags) 263 { 264 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 265 gen6_pte_t __iomem *pte = 266 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 267 268 iowrite32(vm->pte_encode(addr, level, flags), pte); 269 270 ggtt->invalidate(ggtt); 271 } 272 273 /* 274 * Binds an object into the global gtt with the specified cache level. 275 * The object will be accessible to the GPU via commands whose operands 276 * reference offsets within the global GTT as well as accessible by the GPU 277 * through the GMADR mapped BAR (i915->mm.gtt->gtt). 278 */ 279 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 280 struct i915_vma *vma, 281 enum i915_cache_level level, 282 u32 flags) 283 { 284 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 285 gen6_pte_t __iomem *gte; 286 gen6_pte_t __iomem *end; 287 struct sgt_iter iter; 288 dma_addr_t addr; 289 290 gte = (gen6_pte_t __iomem *)ggtt->gsm; 291 gte += vma->node.start / I915_GTT_PAGE_SIZE; 292 end = gte + vma->node.size / I915_GTT_PAGE_SIZE; 293 294 for_each_sgt_daddr(addr, iter, vma->pages) 295 iowrite32(vm->pte_encode(addr, level, flags), gte++); 296 GEM_BUG_ON(gte > end); 297 298 /* Fill the allocated but "unused" space beyond the end of the buffer */ 299 while (gte < end) 300 iowrite32(vm->scratch[0]->encode, gte++); 301 302 /* 303 * We want to flush the TLBs only after we're certain all the PTE 304 * updates have finished. 305 */ 306 ggtt->invalidate(ggtt); 307 } 308 309 static void nop_clear_range(struct i915_address_space *vm, 310 u64 start, u64 length) 311 { 312 } 313 314 static void gen8_ggtt_clear_range(struct i915_address_space *vm, 315 u64 start, u64 length) 316 { 317 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 318 unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 319 unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 320 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; 321 gen8_pte_t __iomem *gtt_base = 322 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 323 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 324 int i; 325 326 if (WARN(num_entries > max_entries, 327 "First entry = %d; Num entries = %d (max=%d)\n", 328 first_entry, num_entries, max_entries)) 329 num_entries = max_entries; 330 331 for (i = 0; i < num_entries; i++) 332 gen8_set_pte(>t_base[i], scratch_pte); 333 } 334 335 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) 336 { 337 /* 338 * Make sure the internal GAM fifo has been cleared of all GTT 339 * writes before exiting stop_machine(). This guarantees that 340 * any aperture accesses waiting to start in another process 341 * cannot back up behind the GTT writes causing a hang. 342 * The register can be any arbitrary GAM register. 343 */ 344 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); 345 } 346 347 struct insert_page { 348 struct i915_address_space *vm; 349 dma_addr_t addr; 350 u64 offset; 351 enum i915_cache_level level; 352 }; 353 354 static int bxt_vtd_ggtt_insert_page__cb(void *_arg) 355 { 356 struct insert_page *arg = _arg; 357 358 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); 359 bxt_vtd_ggtt_wa(arg->vm); 360 361 return 0; 362 } 363 364 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, 365 dma_addr_t addr, 366 u64 offset, 367 enum i915_cache_level level, 368 u32 unused) 369 { 370 struct insert_page arg = { vm, addr, offset, level }; 371 372 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); 373 } 374 375 struct insert_entries { 376 struct i915_address_space *vm; 377 struct i915_vma *vma; 378 enum i915_cache_level level; 379 u32 flags; 380 }; 381 382 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) 383 { 384 struct insert_entries *arg = _arg; 385 386 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); 387 bxt_vtd_ggtt_wa(arg->vm); 388 389 return 0; 390 } 391 392 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, 393 struct i915_vma *vma, 394 enum i915_cache_level level, 395 u32 flags) 396 { 397 struct insert_entries arg = { vm, vma, level, flags }; 398 399 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); 400 } 401 402 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 403 u64 start, u64 length) 404 { 405 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 406 unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 407 unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 408 gen6_pte_t scratch_pte, __iomem *gtt_base = 409 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 410 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 411 int i; 412 413 if (WARN(num_entries > max_entries, 414 "First entry = %d; Num entries = %d (max=%d)\n", 415 first_entry, num_entries, max_entries)) 416 num_entries = max_entries; 417 418 scratch_pte = vm->scratch[0]->encode; 419 for (i = 0; i < num_entries; i++) 420 iowrite32(scratch_pte, >t_base[i]); 421 } 422 423 static void i915_ggtt_insert_page(struct i915_address_space *vm, 424 dma_addr_t addr, 425 u64 offset, 426 enum i915_cache_level cache_level, 427 u32 unused) 428 { 429 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 430 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 431 432 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); 433 } 434 435 static void i915_ggtt_insert_entries(struct i915_address_space *vm, 436 struct i915_vma *vma, 437 enum i915_cache_level cache_level, 438 u32 unused) 439 { 440 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 441 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 442 443 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, 444 flags); 445 } 446 447 static void i915_ggtt_clear_range(struct i915_address_space *vm, 448 u64 start, u64 length) 449 { 450 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); 451 } 452 453 static void ggtt_bind_vma(struct i915_address_space *vm, 454 struct i915_vm_pt_stash *stash, 455 struct i915_vma *vma, 456 enum i915_cache_level cache_level, 457 u32 flags) 458 { 459 struct drm_i915_gem_object *obj = vma->obj; 460 u32 pte_flags; 461 462 if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK)) 463 return; 464 465 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ 466 pte_flags = 0; 467 if (i915_gem_object_is_readonly(obj)) 468 pte_flags |= PTE_READ_ONLY; 469 if (i915_gem_object_is_lmem(obj)) 470 pte_flags |= PTE_LM; 471 472 vm->insert_entries(vm, vma, cache_level, pte_flags); 473 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 474 } 475 476 static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) 477 { 478 vm->clear_range(vm, vma->node.start, vma->size); 479 } 480 481 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) 482 { 483 u64 size; 484 int ret; 485 486 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) 487 return 0; 488 489 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); 490 size = ggtt->vm.total - GUC_GGTT_TOP; 491 492 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, 493 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, 494 PIN_NOEVICT); 495 if (ret) 496 drm_dbg(&ggtt->vm.i915->drm, 497 "Failed to reserve top of GGTT for GuC\n"); 498 499 return ret; 500 } 501 502 static void ggtt_release_guc_top(struct i915_ggtt *ggtt) 503 { 504 if (drm_mm_node_allocated(&ggtt->uc_fw)) 505 drm_mm_remove_node(&ggtt->uc_fw); 506 } 507 508 static void cleanup_init_ggtt(struct i915_ggtt *ggtt) 509 { 510 ggtt_release_guc_top(ggtt); 511 if (drm_mm_node_allocated(&ggtt->error_capture)) 512 drm_mm_remove_node(&ggtt->error_capture); 513 mutex_destroy(&ggtt->error_mutex); 514 } 515 516 static int init_ggtt(struct i915_ggtt *ggtt) 517 { 518 /* 519 * Let GEM Manage all of the aperture. 520 * 521 * However, leave one page at the end still bound to the scratch page. 522 * There are a number of places where the hardware apparently prefetches 523 * past the end of the object, and we've seen multiple hangs with the 524 * GPU head pointer stuck in a batchbuffer bound at the last page of the 525 * aperture. One page should be enough to keep any prefetching inside 526 * of the aperture. 527 */ 528 unsigned long hole_start, hole_end; 529 struct drm_mm_node *entry; 530 int ret; 531 532 /* 533 * GuC requires all resources that we're sharing with it to be placed in 534 * non-WOPCM memory. If GuC is not present or not in use we still need a 535 * small bias as ring wraparound at offset 0 sometimes hangs. No idea 536 * why. 537 */ 538 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, 539 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); 540 541 ret = intel_vgt_balloon(ggtt); 542 if (ret) 543 return ret; 544 545 mutex_init(&ggtt->error_mutex); 546 if (ggtt->mappable_end) { 547 /* 548 * Reserve a mappable slot for our lockless error capture. 549 * 550 * We strongly prefer taking address 0x0 in order to protect 551 * other critical buffers against accidental overwrites, 552 * as writing to address 0 is a very common mistake. 553 * 554 * Since 0 may already be in use by the system (e.g. the BIOS 555 * framebuffer), we let the reservation fail quietly and hope 556 * 0 remains reserved always. 557 * 558 * If we fail to reserve 0, and then fail to find any space 559 * for an error-capture, remain silent. We can afford not 560 * to reserve an error_capture node as we have fallback 561 * paths, and we trust that 0 will remain reserved. However, 562 * the only likely reason for failure to insert is a driver 563 * bug, which we expect to cause other failures... 564 */ 565 ggtt->error_capture.size = I915_GTT_PAGE_SIZE; 566 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; 567 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) 568 drm_mm_insert_node_in_range(&ggtt->vm.mm, 569 &ggtt->error_capture, 570 ggtt->error_capture.size, 0, 571 ggtt->error_capture.color, 572 0, ggtt->mappable_end, 573 DRM_MM_INSERT_LOW); 574 } 575 if (drm_mm_node_allocated(&ggtt->error_capture)) 576 drm_dbg(&ggtt->vm.i915->drm, 577 "Reserved GGTT:[%llx, %llx] for use by error capture\n", 578 ggtt->error_capture.start, 579 ggtt->error_capture.start + ggtt->error_capture.size); 580 581 /* 582 * The upper portion of the GuC address space has a sizeable hole 583 * (several MB) that is inaccessible by GuC. Reserve this range within 584 * GGTT as it can comfortably hold GuC/HuC firmware images. 585 */ 586 ret = ggtt_reserve_guc_top(ggtt); 587 if (ret) 588 goto err; 589 590 /* Clear any non-preallocated blocks */ 591 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { 592 drm_dbg(&ggtt->vm.i915->drm, 593 "clearing unused GTT space: [%lx, %lx]\n", 594 hole_start, hole_end); 595 ggtt->vm.clear_range(&ggtt->vm, hole_start, 596 hole_end - hole_start); 597 } 598 599 /* And finally clear the reserved guard page */ 600 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 601 602 return 0; 603 604 err: 605 cleanup_init_ggtt(ggtt); 606 return ret; 607 } 608 609 static void aliasing_gtt_bind_vma(struct i915_address_space *vm, 610 struct i915_vm_pt_stash *stash, 611 struct i915_vma *vma, 612 enum i915_cache_level cache_level, 613 u32 flags) 614 { 615 u32 pte_flags; 616 617 /* Currently applicable only to VLV */ 618 pte_flags = 0; 619 if (i915_gem_object_is_readonly(vma->obj)) 620 pte_flags |= PTE_READ_ONLY; 621 622 if (flags & I915_VMA_LOCAL_BIND) 623 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, 624 stash, vma, cache_level, flags); 625 626 if (flags & I915_VMA_GLOBAL_BIND) 627 vm->insert_entries(vm, vma, cache_level, pte_flags); 628 } 629 630 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, 631 struct i915_vma *vma) 632 { 633 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 634 vm->clear_range(vm, vma->node.start, vma->size); 635 636 if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) 637 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); 638 } 639 640 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) 641 { 642 struct i915_vm_pt_stash stash = {}; 643 struct i915_ppgtt *ppgtt; 644 int err; 645 646 ppgtt = i915_ppgtt_create(ggtt->vm.gt); 647 if (IS_ERR(ppgtt)) 648 return PTR_ERR(ppgtt); 649 650 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { 651 err = -ENODEV; 652 goto err_ppgtt; 653 } 654 655 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); 656 if (err) 657 goto err_ppgtt; 658 659 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); 660 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); 661 i915_gem_object_unlock(ppgtt->vm.scratch[0]); 662 if (err) 663 goto err_stash; 664 665 /* 666 * Note we only pre-allocate as far as the end of the global 667 * GTT. On 48b / 4-level page-tables, the difference is very, 668 * very significant! We have to preallocate as GVT/vgpu does 669 * not like the page directory disappearing. 670 */ 671 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); 672 673 ggtt->alias = ppgtt; 674 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; 675 676 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); 677 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; 678 679 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); 680 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; 681 682 i915_vm_free_pt_stash(&ppgtt->vm, &stash); 683 return 0; 684 685 err_stash: 686 i915_vm_free_pt_stash(&ppgtt->vm, &stash); 687 err_ppgtt: 688 i915_vm_put(&ppgtt->vm); 689 return err; 690 } 691 692 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) 693 { 694 struct i915_ppgtt *ppgtt; 695 696 ppgtt = fetch_and_zero(&ggtt->alias); 697 if (!ppgtt) 698 return; 699 700 i915_vm_put(&ppgtt->vm); 701 702 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 703 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 704 } 705 706 int i915_init_ggtt(struct drm_i915_private *i915) 707 { 708 int ret; 709 710 ret = init_ggtt(&i915->ggtt); 711 if (ret) 712 return ret; 713 714 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { 715 ret = init_aliasing_ppgtt(&i915->ggtt); 716 if (ret) 717 cleanup_init_ggtt(&i915->ggtt); 718 } 719 720 return 0; 721 } 722 723 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) 724 { 725 struct i915_vma *vma, *vn; 726 727 atomic_set(&ggtt->vm.open, 0); 728 729 rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ 730 flush_workqueue(ggtt->vm.i915->wq); 731 732 mutex_lock(&ggtt->vm.mutex); 733 734 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) 735 WARN_ON(__i915_vma_unbind(vma)); 736 737 if (drm_mm_node_allocated(&ggtt->error_capture)) 738 drm_mm_remove_node(&ggtt->error_capture); 739 mutex_destroy(&ggtt->error_mutex); 740 741 ggtt_release_guc_top(ggtt); 742 intel_vgt_deballoon(ggtt); 743 744 ggtt->vm.cleanup(&ggtt->vm); 745 746 mutex_unlock(&ggtt->vm.mutex); 747 i915_address_space_fini(&ggtt->vm); 748 dma_resv_fini(&ggtt->vm.resv); 749 750 arch_phys_wc_del(ggtt->mtrr); 751 752 if (ggtt->iomap.size) 753 io_mapping_fini(&ggtt->iomap); 754 } 755 756 /** 757 * i915_ggtt_driver_release - Clean up GGTT hardware initialization 758 * @i915: i915 device 759 */ 760 void i915_ggtt_driver_release(struct drm_i915_private *i915) 761 { 762 struct i915_ggtt *ggtt = &i915->ggtt; 763 764 fini_aliasing_ppgtt(ggtt); 765 766 intel_ggtt_fini_fences(ggtt); 767 ggtt_cleanup_hw(ggtt); 768 } 769 770 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 771 { 772 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 773 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 774 return snb_gmch_ctl << 20; 775 } 776 777 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 778 { 779 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 780 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 781 if (bdw_gmch_ctl) 782 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 783 784 #ifdef CONFIG_X86_32 785 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ 786 if (bdw_gmch_ctl > 4) 787 bdw_gmch_ctl = 4; 788 #endif 789 790 return bdw_gmch_ctl << 20; 791 } 792 793 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 794 { 795 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 796 gmch_ctrl &= SNB_GMCH_GGMS_MASK; 797 798 if (gmch_ctrl) 799 return 1 << (20 + gmch_ctrl); 800 801 return 0; 802 } 803 804 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) 805 { 806 struct drm_i915_private *i915 = ggtt->vm.i915; 807 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 808 phys_addr_t phys_addr; 809 u32 pte_flags; 810 int ret; 811 812 /* For Modern GENs the PTEs and register space are split in the BAR */ 813 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; 814 815 /* 816 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range 817 * will be dropped. For WC mappings in general we have 64 byte burst 818 * writes when the WC buffer is flushed, so we can't use it, but have to 819 * resort to an uncached mapping. The WC issue is easily caught by the 820 * readback check when writing GTT PTE entries. 821 */ 822 if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) 823 ggtt->gsm = ioremap(phys_addr, size); 824 else 825 ggtt->gsm = ioremap_wc(phys_addr, size); 826 if (!ggtt->gsm) { 827 drm_err(&i915->drm, "Failed to map the ggtt page table\n"); 828 return -ENOMEM; 829 } 830 831 ret = setup_scratch_page(&ggtt->vm); 832 if (ret) { 833 drm_err(&i915->drm, "Scratch setup failed\n"); 834 /* iounmap will also get called at remove, but meh */ 835 iounmap(ggtt->gsm); 836 return ret; 837 } 838 839 pte_flags = 0; 840 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) 841 pte_flags |= PTE_LM; 842 843 ggtt->vm.scratch[0]->encode = 844 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), 845 I915_CACHE_NONE, pte_flags); 846 847 return 0; 848 } 849 850 int ggtt_set_pages(struct i915_vma *vma) 851 { 852 int ret; 853 854 GEM_BUG_ON(vma->pages); 855 856 ret = i915_get_ggtt_vma_pages(vma); 857 if (ret) 858 return ret; 859 860 vma->page_sizes = vma->obj->mm.page_sizes; 861 862 return 0; 863 } 864 865 static void gen6_gmch_remove(struct i915_address_space *vm) 866 { 867 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 868 869 iounmap(ggtt->gsm); 870 free_scratch(vm); 871 } 872 873 static struct resource pci_resource(struct pci_dev *pdev, int bar) 874 { 875 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), 876 pci_resource_len(pdev, bar)); 877 } 878 879 static int gen8_gmch_probe(struct i915_ggtt *ggtt) 880 { 881 struct drm_i915_private *i915 = ggtt->vm.i915; 882 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 883 unsigned int size; 884 u16 snb_gmch_ctl; 885 886 /* TODO: We're not aware of mappable constraints on gen8 yet */ 887 if (!HAS_LMEM(i915)) { 888 ggtt->gmadr = pci_resource(pdev, 2); 889 ggtt->mappable_end = resource_size(&ggtt->gmadr); 890 } 891 892 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 893 if (IS_CHERRYVIEW(i915)) 894 size = chv_get_total_gtt_size(snb_gmch_ctl); 895 else 896 size = gen8_get_total_gtt_size(snb_gmch_ctl); 897 898 ggtt->vm.alloc_pt_dma = alloc_pt_dma; 899 900 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; 901 ggtt->vm.cleanup = gen6_gmch_remove; 902 ggtt->vm.insert_page = gen8_ggtt_insert_page; 903 ggtt->vm.clear_range = nop_clear_range; 904 if (intel_scanout_needs_vtd_wa(i915)) 905 ggtt->vm.clear_range = gen8_ggtt_clear_range; 906 907 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 908 909 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ 910 if (intel_ggtt_update_needs_vtd_wa(i915) || 911 IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { 912 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; 913 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 914 ggtt->vm.bind_async_flags = 915 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 916 } 917 918 ggtt->invalidate = gen8_ggtt_invalidate; 919 920 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 921 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 922 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 923 ggtt->vm.vma_ops.clear_pages = clear_pages; 924 925 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; 926 927 setup_private_pat(ggtt->vm.gt->uncore); 928 929 return ggtt_probe_common(ggtt, size); 930 } 931 932 static u64 snb_pte_encode(dma_addr_t addr, 933 enum i915_cache_level level, 934 u32 flags) 935 { 936 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 937 938 switch (level) { 939 case I915_CACHE_L3_LLC: 940 case I915_CACHE_LLC: 941 pte |= GEN6_PTE_CACHE_LLC; 942 break; 943 case I915_CACHE_NONE: 944 pte |= GEN6_PTE_UNCACHED; 945 break; 946 default: 947 MISSING_CASE(level); 948 } 949 950 return pte; 951 } 952 953 static u64 ivb_pte_encode(dma_addr_t addr, 954 enum i915_cache_level level, 955 u32 flags) 956 { 957 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 958 959 switch (level) { 960 case I915_CACHE_L3_LLC: 961 pte |= GEN7_PTE_CACHE_L3_LLC; 962 break; 963 case I915_CACHE_LLC: 964 pte |= GEN6_PTE_CACHE_LLC; 965 break; 966 case I915_CACHE_NONE: 967 pte |= GEN6_PTE_UNCACHED; 968 break; 969 default: 970 MISSING_CASE(level); 971 } 972 973 return pte; 974 } 975 976 static u64 byt_pte_encode(dma_addr_t addr, 977 enum i915_cache_level level, 978 u32 flags) 979 { 980 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 981 982 if (!(flags & PTE_READ_ONLY)) 983 pte |= BYT_PTE_WRITEABLE; 984 985 if (level != I915_CACHE_NONE) 986 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 987 988 return pte; 989 } 990 991 static u64 hsw_pte_encode(dma_addr_t addr, 992 enum i915_cache_level level, 993 u32 flags) 994 { 995 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 996 997 if (level != I915_CACHE_NONE) 998 pte |= HSW_WB_LLC_AGE3; 999 1000 return pte; 1001 } 1002 1003 static u64 iris_pte_encode(dma_addr_t addr, 1004 enum i915_cache_level level, 1005 u32 flags) 1006 { 1007 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 1008 1009 switch (level) { 1010 case I915_CACHE_NONE: 1011 break; 1012 case I915_CACHE_WT: 1013 pte |= HSW_WT_ELLC_LLC_AGE3; 1014 break; 1015 default: 1016 pte |= HSW_WB_ELLC_LLC_AGE3; 1017 break; 1018 } 1019 1020 return pte; 1021 } 1022 1023 static int gen6_gmch_probe(struct i915_ggtt *ggtt) 1024 { 1025 struct drm_i915_private *i915 = ggtt->vm.i915; 1026 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 1027 unsigned int size; 1028 u16 snb_gmch_ctl; 1029 1030 ggtt->gmadr = pci_resource(pdev, 2); 1031 ggtt->mappable_end = resource_size(&ggtt->gmadr); 1032 1033 /* 1034 * 64/512MB is the current min/max we actually know of, but this is 1035 * just a coarse sanity check. 1036 */ 1037 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { 1038 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", 1039 &ggtt->mappable_end); 1040 return -ENXIO; 1041 } 1042 1043 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1044 1045 size = gen6_get_total_gtt_size(snb_gmch_ctl); 1046 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; 1047 1048 ggtt->vm.alloc_pt_dma = alloc_pt_dma; 1049 1050 ggtt->vm.clear_range = nop_clear_range; 1051 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) 1052 ggtt->vm.clear_range = gen6_ggtt_clear_range; 1053 ggtt->vm.insert_page = gen6_ggtt_insert_page; 1054 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; 1055 ggtt->vm.cleanup = gen6_gmch_remove; 1056 1057 ggtt->invalidate = gen6_ggtt_invalidate; 1058 1059 if (HAS_EDRAM(i915)) 1060 ggtt->vm.pte_encode = iris_pte_encode; 1061 else if (IS_HASWELL(i915)) 1062 ggtt->vm.pte_encode = hsw_pte_encode; 1063 else if (IS_VALLEYVIEW(i915)) 1064 ggtt->vm.pte_encode = byt_pte_encode; 1065 else if (INTEL_GEN(i915) >= 7) 1066 ggtt->vm.pte_encode = ivb_pte_encode; 1067 else 1068 ggtt->vm.pte_encode = snb_pte_encode; 1069 1070 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 1071 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 1072 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 1073 ggtt->vm.vma_ops.clear_pages = clear_pages; 1074 1075 return ggtt_probe_common(ggtt, size); 1076 } 1077 1078 static void i915_gmch_remove(struct i915_address_space *vm) 1079 { 1080 intel_gmch_remove(); 1081 } 1082 1083 static int i915_gmch_probe(struct i915_ggtt *ggtt) 1084 { 1085 struct drm_i915_private *i915 = ggtt->vm.i915; 1086 phys_addr_t gmadr_base; 1087 int ret; 1088 1089 ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL); 1090 if (!ret) { 1091 drm_err(&i915->drm, "failed to set up gmch\n"); 1092 return -EIO; 1093 } 1094 1095 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); 1096 1097 ggtt->gmadr = 1098 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); 1099 1100 ggtt->vm.alloc_pt_dma = alloc_pt_dma; 1101 1102 if (needs_idle_maps(i915)) { 1103 drm_notice(&i915->drm, 1104 "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n"); 1105 ggtt->do_idle_maps = true; 1106 } 1107 1108 ggtt->vm.insert_page = i915_ggtt_insert_page; 1109 ggtt->vm.insert_entries = i915_ggtt_insert_entries; 1110 ggtt->vm.clear_range = i915_ggtt_clear_range; 1111 ggtt->vm.cleanup = i915_gmch_remove; 1112 1113 ggtt->invalidate = gmch_ggtt_invalidate; 1114 1115 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 1116 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 1117 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 1118 ggtt->vm.vma_ops.clear_pages = clear_pages; 1119 1120 if (unlikely(ggtt->do_idle_maps)) 1121 drm_notice(&i915->drm, 1122 "Applying Ironlake quirks for intel_iommu\n"); 1123 1124 return 0; 1125 } 1126 1127 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) 1128 { 1129 struct drm_i915_private *i915 = gt->i915; 1130 int ret; 1131 1132 ggtt->vm.gt = gt; 1133 ggtt->vm.i915 = i915; 1134 ggtt->vm.dma = i915->drm.dev; 1135 dma_resv_init(&ggtt->vm.resv); 1136 1137 if (INTEL_GEN(i915) <= 5) 1138 ret = i915_gmch_probe(ggtt); 1139 else if (INTEL_GEN(i915) < 8) 1140 ret = gen6_gmch_probe(ggtt); 1141 else 1142 ret = gen8_gmch_probe(ggtt); 1143 if (ret) { 1144 dma_resv_fini(&ggtt->vm.resv); 1145 return ret; 1146 } 1147 1148 if ((ggtt->vm.total - 1) >> 32) { 1149 drm_err(&i915->drm, 1150 "We never expected a Global GTT with more than 32bits" 1151 " of address space! Found %lldM!\n", 1152 ggtt->vm.total >> 20); 1153 ggtt->vm.total = 1ULL << 32; 1154 ggtt->mappable_end = 1155 min_t(u64, ggtt->mappable_end, ggtt->vm.total); 1156 } 1157 1158 if (ggtt->mappable_end > ggtt->vm.total) { 1159 drm_err(&i915->drm, 1160 "mappable aperture extends past end of GGTT," 1161 " aperture=%pa, total=%llx\n", 1162 &ggtt->mappable_end, ggtt->vm.total); 1163 ggtt->mappable_end = ggtt->vm.total; 1164 } 1165 1166 /* GMADR is the PCI mmio aperture into the global GTT. */ 1167 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); 1168 drm_dbg(&i915->drm, "GMADR size = %lluM\n", 1169 (u64)ggtt->mappable_end >> 20); 1170 drm_dbg(&i915->drm, "DSM size = %lluM\n", 1171 (u64)resource_size(&intel_graphics_stolen_res) >> 20); 1172 1173 return 0; 1174 } 1175 1176 /** 1177 * i915_ggtt_probe_hw - Probe GGTT hardware location 1178 * @i915: i915 device 1179 */ 1180 int i915_ggtt_probe_hw(struct drm_i915_private *i915) 1181 { 1182 int ret; 1183 1184 ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); 1185 if (ret) 1186 return ret; 1187 1188 if (intel_vtd_active()) 1189 drm_info(&i915->drm, "VT-d active for gfx access\n"); 1190 1191 return 0; 1192 } 1193 1194 int i915_ggtt_enable_hw(struct drm_i915_private *i915) 1195 { 1196 if (INTEL_GEN(i915) < 6 && !intel_enable_gtt()) 1197 return -EIO; 1198 1199 return 0; 1200 } 1201 1202 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) 1203 { 1204 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); 1205 1206 ggtt->invalidate = guc_ggtt_invalidate; 1207 1208 ggtt->invalidate(ggtt); 1209 } 1210 1211 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) 1212 { 1213 /* XXX Temporary pardon for error unload */ 1214 if (ggtt->invalidate == gen8_ggtt_invalidate) 1215 return; 1216 1217 /* We should only be called after i915_ggtt_enable_guc() */ 1218 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); 1219 1220 ggtt->invalidate = gen8_ggtt_invalidate; 1221 1222 ggtt->invalidate(ggtt); 1223 } 1224 1225 void i915_ggtt_resume(struct i915_ggtt *ggtt) 1226 { 1227 struct i915_vma *vma; 1228 bool flush = false; 1229 int open; 1230 1231 intel_gt_check_and_clear_faults(ggtt->vm.gt); 1232 1233 /* First fill our portion of the GTT with scratch pages */ 1234 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 1235 1236 /* Skip rewriting PTE on VMA unbind. */ 1237 open = atomic_xchg(&ggtt->vm.open, 0); 1238 1239 /* clflush objects bound into the GGTT and rebind them. */ 1240 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { 1241 struct drm_i915_gem_object *obj = vma->obj; 1242 unsigned int was_bound = 1243 atomic_read(&vma->flags) & I915_VMA_BIND_MASK; 1244 1245 GEM_BUG_ON(!was_bound); 1246 vma->ops->bind_vma(&ggtt->vm, NULL, vma, 1247 obj ? obj->cache_level : 0, 1248 was_bound); 1249 if (obj) { /* only used during resume => exclusive access */ 1250 flush |= fetch_and_zero(&obj->write_domain); 1251 obj->read_domains |= I915_GEM_DOMAIN_GTT; 1252 } 1253 } 1254 1255 atomic_set(&ggtt->vm.open, open); 1256 ggtt->invalidate(ggtt); 1257 1258 if (flush) 1259 wbinvd_on_all_cpus(); 1260 1261 if (INTEL_GEN(ggtt->vm.i915) >= 8) 1262 setup_private_pat(ggtt->vm.gt->uncore); 1263 1264 intel_ggtt_restore_fences(ggtt); 1265 } 1266 1267 static struct scatterlist * 1268 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 1269 unsigned int width, unsigned int height, 1270 unsigned int src_stride, unsigned int dst_stride, 1271 struct sg_table *st, struct scatterlist *sg) 1272 { 1273 unsigned int column, row; 1274 unsigned int src_idx; 1275 1276 for (column = 0; column < width; column++) { 1277 unsigned int left; 1278 1279 src_idx = src_stride * (height - 1) + column + offset; 1280 for (row = 0; row < height; row++) { 1281 st->nents++; 1282 /* 1283 * We don't need the pages, but need to initialize 1284 * the entries so the sg list can be happily traversed. 1285 * The only thing we need are DMA addresses. 1286 */ 1287 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 1288 sg_dma_address(sg) = 1289 i915_gem_object_get_dma_address(obj, src_idx); 1290 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 1291 sg = sg_next(sg); 1292 src_idx -= src_stride; 1293 } 1294 1295 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; 1296 1297 if (!left) 1298 continue; 1299 1300 st->nents++; 1301 1302 /* 1303 * The DE ignores the PTEs for the padding tiles, the sg entry 1304 * here is just a conenience to indicate how many padding PTEs 1305 * to insert at this spot. 1306 */ 1307 sg_set_page(sg, NULL, left, 0); 1308 sg_dma_address(sg) = 0; 1309 sg_dma_len(sg) = left; 1310 sg = sg_next(sg); 1311 } 1312 1313 return sg; 1314 } 1315 1316 static noinline struct sg_table * 1317 intel_rotate_pages(struct intel_rotation_info *rot_info, 1318 struct drm_i915_gem_object *obj) 1319 { 1320 unsigned int size = intel_rotation_info_size(rot_info); 1321 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1322 struct sg_table *st; 1323 struct scatterlist *sg; 1324 int ret = -ENOMEM; 1325 int i; 1326 1327 /* Allocate target SG list. */ 1328 st = kmalloc(sizeof(*st), GFP_KERNEL); 1329 if (!st) 1330 goto err_st_alloc; 1331 1332 ret = sg_alloc_table(st, size, GFP_KERNEL); 1333 if (ret) 1334 goto err_sg_alloc; 1335 1336 st->nents = 0; 1337 sg = st->sgl; 1338 1339 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 1340 sg = rotate_pages(obj, rot_info->plane[i].offset, 1341 rot_info->plane[i].width, rot_info->plane[i].height, 1342 rot_info->plane[i].src_stride, 1343 rot_info->plane[i].dst_stride, 1344 st, sg); 1345 1346 return st; 1347 1348 err_sg_alloc: 1349 kfree(st); 1350 err_st_alloc: 1351 1352 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 1353 obj->base.size, rot_info->plane[0].width, 1354 rot_info->plane[0].height, size); 1355 1356 return ERR_PTR(ret); 1357 } 1358 1359 static struct scatterlist * 1360 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, 1361 unsigned int width, unsigned int height, 1362 unsigned int src_stride, unsigned int dst_stride, 1363 struct sg_table *st, struct scatterlist *sg) 1364 { 1365 unsigned int row; 1366 1367 for (row = 0; row < height; row++) { 1368 unsigned int left = width * I915_GTT_PAGE_SIZE; 1369 1370 while (left) { 1371 dma_addr_t addr; 1372 unsigned int length; 1373 1374 /* 1375 * We don't need the pages, but need to initialize 1376 * the entries so the sg list can be happily traversed. 1377 * The only thing we need are DMA addresses. 1378 */ 1379 1380 addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 1381 1382 length = min(left, length); 1383 1384 st->nents++; 1385 1386 sg_set_page(sg, NULL, length, 0); 1387 sg_dma_address(sg) = addr; 1388 sg_dma_len(sg) = length; 1389 sg = sg_next(sg); 1390 1391 offset += length / I915_GTT_PAGE_SIZE; 1392 left -= length; 1393 } 1394 1395 offset += src_stride - width; 1396 1397 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; 1398 1399 if (!left) 1400 continue; 1401 1402 st->nents++; 1403 1404 /* 1405 * The DE ignores the PTEs for the padding tiles, the sg entry 1406 * here is just a conenience to indicate how many padding PTEs 1407 * to insert at this spot. 1408 */ 1409 sg_set_page(sg, NULL, left, 0); 1410 sg_dma_address(sg) = 0; 1411 sg_dma_len(sg) = left; 1412 sg = sg_next(sg); 1413 } 1414 1415 return sg; 1416 } 1417 1418 static noinline struct sg_table * 1419 intel_remap_pages(struct intel_remapped_info *rem_info, 1420 struct drm_i915_gem_object *obj) 1421 { 1422 unsigned int size = intel_remapped_info_size(rem_info); 1423 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1424 struct sg_table *st; 1425 struct scatterlist *sg; 1426 int ret = -ENOMEM; 1427 int i; 1428 1429 /* Allocate target SG list. */ 1430 st = kmalloc(sizeof(*st), GFP_KERNEL); 1431 if (!st) 1432 goto err_st_alloc; 1433 1434 ret = sg_alloc_table(st, size, GFP_KERNEL); 1435 if (ret) 1436 goto err_sg_alloc; 1437 1438 st->nents = 0; 1439 sg = st->sgl; 1440 1441 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 1442 sg = remap_pages(obj, rem_info->plane[i].offset, 1443 rem_info->plane[i].width, rem_info->plane[i].height, 1444 rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, 1445 st, sg); 1446 } 1447 1448 i915_sg_trim(st); 1449 1450 return st; 1451 1452 err_sg_alloc: 1453 kfree(st); 1454 err_st_alloc: 1455 1456 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 1457 obj->base.size, rem_info->plane[0].width, 1458 rem_info->plane[0].height, size); 1459 1460 return ERR_PTR(ret); 1461 } 1462 1463 static noinline struct sg_table * 1464 intel_partial_pages(const struct i915_ggtt_view *view, 1465 struct drm_i915_gem_object *obj) 1466 { 1467 struct sg_table *st; 1468 struct scatterlist *sg, *iter; 1469 unsigned int count = view->partial.size; 1470 unsigned int offset; 1471 int ret = -ENOMEM; 1472 1473 st = kmalloc(sizeof(*st), GFP_KERNEL); 1474 if (!st) 1475 goto err_st_alloc; 1476 1477 ret = sg_alloc_table(st, count, GFP_KERNEL); 1478 if (ret) 1479 goto err_sg_alloc; 1480 1481 iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true); 1482 GEM_BUG_ON(!iter); 1483 1484 sg = st->sgl; 1485 st->nents = 0; 1486 do { 1487 unsigned int len; 1488 1489 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 1490 count << PAGE_SHIFT); 1491 sg_set_page(sg, NULL, len, 0); 1492 sg_dma_address(sg) = 1493 sg_dma_address(iter) + (offset << PAGE_SHIFT); 1494 sg_dma_len(sg) = len; 1495 1496 st->nents++; 1497 count -= len >> PAGE_SHIFT; 1498 if (count == 0) { 1499 sg_mark_end(sg); 1500 i915_sg_trim(st); /* Drop any unused tail entries. */ 1501 1502 return st; 1503 } 1504 1505 sg = __sg_next(sg); 1506 iter = __sg_next(iter); 1507 offset = 0; 1508 } while (1); 1509 1510 err_sg_alloc: 1511 kfree(st); 1512 err_st_alloc: 1513 return ERR_PTR(ret); 1514 } 1515 1516 static int 1517 i915_get_ggtt_vma_pages(struct i915_vma *vma) 1518 { 1519 int ret; 1520 1521 /* 1522 * The vma->pages are only valid within the lifespan of the borrowed 1523 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 1524 * must be the vma->pages. A simple rule is that vma->pages must only 1525 * be accessed when the obj->mm.pages are pinned. 1526 */ 1527 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 1528 1529 switch (vma->ggtt_view.type) { 1530 default: 1531 GEM_BUG_ON(vma->ggtt_view.type); 1532 fallthrough; 1533 case I915_GGTT_VIEW_NORMAL: 1534 vma->pages = vma->obj->mm.pages; 1535 return 0; 1536 1537 case I915_GGTT_VIEW_ROTATED: 1538 vma->pages = 1539 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 1540 break; 1541 1542 case I915_GGTT_VIEW_REMAPPED: 1543 vma->pages = 1544 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); 1545 break; 1546 1547 case I915_GGTT_VIEW_PARTIAL: 1548 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 1549 break; 1550 } 1551 1552 ret = 0; 1553 if (IS_ERR(vma->pages)) { 1554 ret = PTR_ERR(vma->pages); 1555 vma->pages = NULL; 1556 drm_err(&vma->vm->i915->drm, 1557 "Failed to get pages for VMA view type %u (%d)!\n", 1558 vma->ggtt_view.type, ret); 1559 } 1560 return ret; 1561 } 1562