1 /* 2 * Copyright © 2010 Daniel Vetter 3 * Copyright © 2011-2014 Intel Corporation 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/slab.h> /* fault-inject.h is not standalone! */ 27 28 #include <linux/fault-inject.h> 29 #include <linux/log2.h> 30 #include <linux/random.h> 31 #include <linux/seq_file.h> 32 #include <linux/stop_machine.h> 33 34 #include <asm/set_memory.h> 35 36 #include <drm/drmP.h> 37 #include <drm/i915_drm.h> 38 39 #include "i915_drv.h" 40 #include "i915_vgpu.h" 41 #include "i915_trace.h" 42 #include "intel_drv.h" 43 #include "intel_frontbuffer.h" 44 45 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 46 47 /** 48 * DOC: Global GTT views 49 * 50 * Background and previous state 51 * 52 * Historically objects could exists (be bound) in global GTT space only as 53 * singular instances with a view representing all of the object's backing pages 54 * in a linear fashion. This view will be called a normal view. 55 * 56 * To support multiple views of the same object, where the number of mapped 57 * pages is not equal to the backing store, or where the layout of the pages 58 * is not linear, concept of a GGTT view was added. 59 * 60 * One example of an alternative view is a stereo display driven by a single 61 * image. In this case we would have a framebuffer looking like this 62 * (2x2 pages): 63 * 64 * 12 65 * 34 66 * 67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU 68 * rendering. In contrast, fed to the display engine would be an alternative 69 * view which could look something like this: 70 * 71 * 1212 72 * 3434 73 * 74 * In this example both the size and layout of pages in the alternative view is 75 * different from the normal view. 76 * 77 * Implementation and usage 78 * 79 * GGTT views are implemented using VMAs and are distinguished via enum 80 * i915_ggtt_view_type and struct i915_ggtt_view. 81 * 82 * A new flavour of core GEM functions which work with GGTT bound objects were 83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid 84 * renaming in large amounts of code. They take the struct i915_ggtt_view 85 * parameter encapsulating all metadata required to implement a view. 86 * 87 * As a helper for callers which are only interested in the normal view, 88 * globally const i915_ggtt_view_normal singleton instance exists. All old core 89 * GEM API functions, the ones not taking the view parameter, are operating on, 90 * or with the normal GGTT view. 91 * 92 * Code wanting to add or use a new GGTT view needs to: 93 * 94 * 1. Add a new enum with a suitable name. 95 * 2. Extend the metadata in the i915_ggtt_view structure if required. 96 * 3. Add support to i915_get_vma_pages(). 97 * 98 * New views are required to build a scatter-gather table from within the 99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and 100 * exists for the lifetime of an VMA. 101 * 102 * Core API is designed to have copy semantics which means that passed in 103 * struct i915_ggtt_view does not need to be persistent (left around after 104 * calling the core API functions). 105 * 106 */ 107 108 static int 109 i915_get_ggtt_vma_pages(struct i915_vma *vma); 110 111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv) 112 { 113 /* 114 * Note that as an uncached mmio write, this will flush the 115 * WCB of the writes into the GGTT before it triggers the invalidate. 116 */ 117 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 118 } 119 120 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv) 121 { 122 gen6_ggtt_invalidate(dev_priv); 123 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 124 } 125 126 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv) 127 { 128 intel_gtt_chipset_flush(); 129 } 130 131 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) 132 { 133 i915->ggtt.invalidate(i915); 134 } 135 136 static int ppgtt_bind_vma(struct i915_vma *vma, 137 enum i915_cache_level cache_level, 138 u32 unused) 139 { 140 u32 pte_flags; 141 int err; 142 143 if (!(vma->flags & I915_VMA_LOCAL_BIND)) { 144 err = vma->vm->allocate_va_range(vma->vm, 145 vma->node.start, vma->size); 146 if (err) 147 return err; 148 } 149 150 /* Applicable to VLV, and gen8+ */ 151 pte_flags = 0; 152 if (i915_gem_object_is_readonly(vma->obj)) 153 pte_flags |= PTE_READ_ONLY; 154 155 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); 156 157 return 0; 158 } 159 160 static void ppgtt_unbind_vma(struct i915_vma *vma) 161 { 162 vma->vm->clear_range(vma->vm, vma->node.start, vma->size); 163 } 164 165 static int ppgtt_set_pages(struct i915_vma *vma) 166 { 167 GEM_BUG_ON(vma->pages); 168 169 vma->pages = vma->obj->mm.pages; 170 171 vma->page_sizes = vma->obj->mm.page_sizes; 172 173 return 0; 174 } 175 176 static void clear_pages(struct i915_vma *vma) 177 { 178 GEM_BUG_ON(!vma->pages); 179 180 if (vma->pages != vma->obj->mm.pages) { 181 sg_free_table(vma->pages); 182 kfree(vma->pages); 183 } 184 vma->pages = NULL; 185 186 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); 187 } 188 189 static u64 gen8_pte_encode(dma_addr_t addr, 190 enum i915_cache_level level, 191 u32 flags) 192 { 193 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; 194 195 if (unlikely(flags & PTE_READ_ONLY)) 196 pte &= ~_PAGE_RW; 197 198 switch (level) { 199 case I915_CACHE_NONE: 200 pte |= PPAT_UNCACHED; 201 break; 202 case I915_CACHE_WT: 203 pte |= PPAT_DISPLAY_ELLC; 204 break; 205 default: 206 pte |= PPAT_CACHED; 207 break; 208 } 209 210 return pte; 211 } 212 213 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, 214 const enum i915_cache_level level) 215 { 216 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; 217 pde |= addr; 218 if (level != I915_CACHE_NONE) 219 pde |= PPAT_CACHED_PDE; 220 else 221 pde |= PPAT_UNCACHED; 222 return pde; 223 } 224 225 #define gen8_pdpe_encode gen8_pde_encode 226 #define gen8_pml4e_encode gen8_pde_encode 227 228 static u64 snb_pte_encode(dma_addr_t addr, 229 enum i915_cache_level level, 230 u32 flags) 231 { 232 gen6_pte_t pte = GEN6_PTE_VALID; 233 pte |= GEN6_PTE_ADDR_ENCODE(addr); 234 235 switch (level) { 236 case I915_CACHE_L3_LLC: 237 case I915_CACHE_LLC: 238 pte |= GEN6_PTE_CACHE_LLC; 239 break; 240 case I915_CACHE_NONE: 241 pte |= GEN6_PTE_UNCACHED; 242 break; 243 default: 244 MISSING_CASE(level); 245 } 246 247 return pte; 248 } 249 250 static u64 ivb_pte_encode(dma_addr_t addr, 251 enum i915_cache_level level, 252 u32 flags) 253 { 254 gen6_pte_t pte = GEN6_PTE_VALID; 255 pte |= GEN6_PTE_ADDR_ENCODE(addr); 256 257 switch (level) { 258 case I915_CACHE_L3_LLC: 259 pte |= GEN7_PTE_CACHE_L3_LLC; 260 break; 261 case I915_CACHE_LLC: 262 pte |= GEN6_PTE_CACHE_LLC; 263 break; 264 case I915_CACHE_NONE: 265 pte |= GEN6_PTE_UNCACHED; 266 break; 267 default: 268 MISSING_CASE(level); 269 } 270 271 return pte; 272 } 273 274 static u64 byt_pte_encode(dma_addr_t addr, 275 enum i915_cache_level level, 276 u32 flags) 277 { 278 gen6_pte_t pte = GEN6_PTE_VALID; 279 pte |= GEN6_PTE_ADDR_ENCODE(addr); 280 281 if (!(flags & PTE_READ_ONLY)) 282 pte |= BYT_PTE_WRITEABLE; 283 284 if (level != I915_CACHE_NONE) 285 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 286 287 return pte; 288 } 289 290 static u64 hsw_pte_encode(dma_addr_t addr, 291 enum i915_cache_level level, 292 u32 flags) 293 { 294 gen6_pte_t pte = GEN6_PTE_VALID; 295 pte |= HSW_PTE_ADDR_ENCODE(addr); 296 297 if (level != I915_CACHE_NONE) 298 pte |= HSW_WB_LLC_AGE3; 299 300 return pte; 301 } 302 303 static u64 iris_pte_encode(dma_addr_t addr, 304 enum i915_cache_level level, 305 u32 flags) 306 { 307 gen6_pte_t pte = GEN6_PTE_VALID; 308 pte |= HSW_PTE_ADDR_ENCODE(addr); 309 310 switch (level) { 311 case I915_CACHE_NONE: 312 break; 313 case I915_CACHE_WT: 314 pte |= HSW_WT_ELLC_LLC_AGE3; 315 break; 316 default: 317 pte |= HSW_WB_ELLC_LLC_AGE3; 318 break; 319 } 320 321 return pte; 322 } 323 324 static void stash_init(struct pagestash *stash) 325 { 326 pagevec_init(&stash->pvec); 327 spin_lock_init(&stash->lock); 328 } 329 330 static struct page *stash_pop_page(struct pagestash *stash) 331 { 332 struct page *page = NULL; 333 334 spin_lock(&stash->lock); 335 if (likely(stash->pvec.nr)) 336 page = stash->pvec.pages[--stash->pvec.nr]; 337 spin_unlock(&stash->lock); 338 339 return page; 340 } 341 342 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) 343 { 344 int nr; 345 346 spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); 347 348 nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); 349 memcpy(stash->pvec.pages + stash->pvec.nr, 350 pvec->pages + pvec->nr - nr, 351 sizeof(pvec->pages[0]) * nr); 352 stash->pvec.nr += nr; 353 354 spin_unlock(&stash->lock); 355 356 pvec->nr -= nr; 357 } 358 359 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) 360 { 361 struct pagevec stack; 362 struct page *page; 363 364 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) 365 i915_gem_shrink_all(vm->i915); 366 367 page = stash_pop_page(&vm->free_pages); 368 if (page) 369 return page; 370 371 if (!vm->pt_kmap_wc) 372 return alloc_page(gfp); 373 374 /* Look in our global stash of WC pages... */ 375 page = stash_pop_page(&vm->i915->mm.wc_stash); 376 if (page) 377 return page; 378 379 /* 380 * Otherwise batch allocate pages to amortize cost of set_pages_wc. 381 * 382 * We have to be careful as page allocation may trigger the shrinker 383 * (via direct reclaim) which will fill up the WC stash underneath us. 384 * So we add our WB pages into a temporary pvec on the stack and merge 385 * them into the WC stash after all the allocations are complete. 386 */ 387 pagevec_init(&stack); 388 do { 389 struct page *page; 390 391 page = alloc_page(gfp); 392 if (unlikely(!page)) 393 break; 394 395 stack.pages[stack.nr++] = page; 396 } while (pagevec_space(&stack)); 397 398 if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { 399 page = stack.pages[--stack.nr]; 400 401 /* Merge spare WC pages to the global stash */ 402 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); 403 404 /* Push any surplus WC pages onto the local VM stash */ 405 if (stack.nr) 406 stash_push_pagevec(&vm->free_pages, &stack); 407 } 408 409 /* Return unwanted leftovers */ 410 if (unlikely(stack.nr)) { 411 WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); 412 __pagevec_release(&stack); 413 } 414 415 return page; 416 } 417 418 static void vm_free_pages_release(struct i915_address_space *vm, 419 bool immediate) 420 { 421 struct pagevec *pvec = &vm->free_pages.pvec; 422 struct pagevec stack; 423 424 lockdep_assert_held(&vm->free_pages.lock); 425 GEM_BUG_ON(!pagevec_count(pvec)); 426 427 if (vm->pt_kmap_wc) { 428 /* 429 * When we use WC, first fill up the global stash and then 430 * only if full immediately free the overflow. 431 */ 432 stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); 433 434 /* 435 * As we have made some room in the VM's free_pages, 436 * we can wait for it to fill again. Unless we are 437 * inside i915_address_space_fini() and must 438 * immediately release the pages! 439 */ 440 if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) 441 return; 442 443 /* 444 * We have to drop the lock to allow ourselves to sleep, 445 * so take a copy of the pvec and clear the stash for 446 * others to use it as we sleep. 447 */ 448 stack = *pvec; 449 pagevec_reinit(pvec); 450 spin_unlock(&vm->free_pages.lock); 451 452 pvec = &stack; 453 set_pages_array_wb(pvec->pages, pvec->nr); 454 455 spin_lock(&vm->free_pages.lock); 456 } 457 458 __pagevec_release(pvec); 459 } 460 461 static void vm_free_page(struct i915_address_space *vm, struct page *page) 462 { 463 /* 464 * On !llc, we need to change the pages back to WB. We only do so 465 * in bulk, so we rarely need to change the page attributes here, 466 * but doing so requires a stop_machine() from deep inside arch/x86/mm. 467 * To make detection of the possible sleep more likely, use an 468 * unconditional might_sleep() for everybody. 469 */ 470 might_sleep(); 471 spin_lock(&vm->free_pages.lock); 472 if (!pagevec_add(&vm->free_pages.pvec, page)) 473 vm_free_pages_release(vm, false); 474 spin_unlock(&vm->free_pages.lock); 475 } 476 477 static void i915_address_space_init(struct i915_address_space *vm, 478 struct drm_i915_private *dev_priv) 479 { 480 /* 481 * The vm->mutex must be reclaim safe (for use in the shrinker). 482 * Do a dummy acquire now under fs_reclaim so that any allocation 483 * attempt holding the lock is immediately reported by lockdep. 484 */ 485 mutex_init(&vm->mutex); 486 i915_gem_shrinker_taints_mutex(&vm->mutex); 487 488 GEM_BUG_ON(!vm->total); 489 drm_mm_init(&vm->mm, 0, vm->total); 490 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; 491 492 stash_init(&vm->free_pages); 493 494 INIT_LIST_HEAD(&vm->active_list); 495 INIT_LIST_HEAD(&vm->inactive_list); 496 INIT_LIST_HEAD(&vm->unbound_list); 497 } 498 499 static void i915_address_space_fini(struct i915_address_space *vm) 500 { 501 spin_lock(&vm->free_pages.lock); 502 if (pagevec_count(&vm->free_pages.pvec)) 503 vm_free_pages_release(vm, true); 504 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); 505 spin_unlock(&vm->free_pages.lock); 506 507 drm_mm_takedown(&vm->mm); 508 509 mutex_destroy(&vm->mutex); 510 } 511 512 static int __setup_page_dma(struct i915_address_space *vm, 513 struct i915_page_dma *p, 514 gfp_t gfp) 515 { 516 p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); 517 if (unlikely(!p->page)) 518 return -ENOMEM; 519 520 p->daddr = dma_map_page_attrs(vm->dma, 521 p->page, 0, PAGE_SIZE, 522 PCI_DMA_BIDIRECTIONAL, 523 DMA_ATTR_SKIP_CPU_SYNC | 524 DMA_ATTR_NO_WARN); 525 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { 526 vm_free_page(vm, p->page); 527 return -ENOMEM; 528 } 529 530 return 0; 531 } 532 533 static int setup_page_dma(struct i915_address_space *vm, 534 struct i915_page_dma *p) 535 { 536 return __setup_page_dma(vm, p, __GFP_HIGHMEM); 537 } 538 539 static void cleanup_page_dma(struct i915_address_space *vm, 540 struct i915_page_dma *p) 541 { 542 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 543 vm_free_page(vm, p->page); 544 } 545 546 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) 547 548 #define setup_px(vm, px) setup_page_dma((vm), px_base(px)) 549 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) 550 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v)) 551 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v)) 552 553 static void fill_page_dma(struct i915_address_space *vm, 554 struct i915_page_dma *p, 555 const u64 val) 556 { 557 u64 * const vaddr = kmap_atomic(p->page); 558 559 memset64(vaddr, val, PAGE_SIZE / sizeof(val)); 560 561 kunmap_atomic(vaddr); 562 } 563 564 static void fill_page_dma_32(struct i915_address_space *vm, 565 struct i915_page_dma *p, 566 const u32 v) 567 { 568 fill_page_dma(vm, p, (u64)v << 32 | v); 569 } 570 571 static int 572 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) 573 { 574 unsigned long size; 575 576 /* 577 * In order to utilize 64K pages for an object with a size < 2M, we will 578 * need to support a 64K scratch page, given that every 16th entry for a 579 * page-table operating in 64K mode must point to a properly aligned 64K 580 * region, including any PTEs which happen to point to scratch. 581 * 582 * This is only relevant for the 48b PPGTT where we support 583 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the 584 * scratch (read-only) between all vm, we create one 64k scratch page 585 * for all. 586 */ 587 size = I915_GTT_PAGE_SIZE_4K; 588 if (i915_vm_is_48bit(vm) && 589 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { 590 size = I915_GTT_PAGE_SIZE_64K; 591 gfp |= __GFP_NOWARN; 592 } 593 gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; 594 595 do { 596 int order = get_order(size); 597 struct page *page; 598 dma_addr_t addr; 599 600 page = alloc_pages(gfp, order); 601 if (unlikely(!page)) 602 goto skip; 603 604 addr = dma_map_page_attrs(vm->dma, 605 page, 0, size, 606 PCI_DMA_BIDIRECTIONAL, 607 DMA_ATTR_SKIP_CPU_SYNC | 608 DMA_ATTR_NO_WARN); 609 if (unlikely(dma_mapping_error(vm->dma, addr))) 610 goto free_page; 611 612 if (unlikely(!IS_ALIGNED(addr, size))) 613 goto unmap_page; 614 615 vm->scratch_page.page = page; 616 vm->scratch_page.daddr = addr; 617 vm->scratch_page.order = order; 618 return 0; 619 620 unmap_page: 621 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); 622 free_page: 623 __free_pages(page, order); 624 skip: 625 if (size == I915_GTT_PAGE_SIZE_4K) 626 return -ENOMEM; 627 628 size = I915_GTT_PAGE_SIZE_4K; 629 gfp &= ~__GFP_NOWARN; 630 } while (1); 631 } 632 633 static void cleanup_scratch_page(struct i915_address_space *vm) 634 { 635 struct i915_page_dma *p = &vm->scratch_page; 636 637 dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT, 638 PCI_DMA_BIDIRECTIONAL); 639 __free_pages(p->page, p->order); 640 } 641 642 static struct i915_page_table *alloc_pt(struct i915_address_space *vm) 643 { 644 struct i915_page_table *pt; 645 646 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); 647 if (unlikely(!pt)) 648 return ERR_PTR(-ENOMEM); 649 650 if (unlikely(setup_px(vm, pt))) { 651 kfree(pt); 652 return ERR_PTR(-ENOMEM); 653 } 654 655 pt->used_ptes = 0; 656 return pt; 657 } 658 659 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) 660 { 661 cleanup_px(vm, pt); 662 kfree(pt); 663 } 664 665 static void gen8_initialize_pt(struct i915_address_space *vm, 666 struct i915_page_table *pt) 667 { 668 fill_px(vm, pt, vm->scratch_pte); 669 } 670 671 static void gen6_initialize_pt(struct i915_address_space *vm, 672 struct i915_page_table *pt) 673 { 674 fill32_px(vm, pt, vm->scratch_pte); 675 } 676 677 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) 678 { 679 struct i915_page_directory *pd; 680 681 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); 682 if (unlikely(!pd)) 683 return ERR_PTR(-ENOMEM); 684 685 if (unlikely(setup_px(vm, pd))) { 686 kfree(pd); 687 return ERR_PTR(-ENOMEM); 688 } 689 690 pd->used_pdes = 0; 691 return pd; 692 } 693 694 static void free_pd(struct i915_address_space *vm, 695 struct i915_page_directory *pd) 696 { 697 cleanup_px(vm, pd); 698 kfree(pd); 699 } 700 701 static void gen8_initialize_pd(struct i915_address_space *vm, 702 struct i915_page_directory *pd) 703 { 704 fill_px(vm, pd, 705 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); 706 memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES); 707 } 708 709 static int __pdp_init(struct i915_address_space *vm, 710 struct i915_page_directory_pointer *pdp) 711 { 712 const unsigned int pdpes = i915_pdpes_per_pdp(vm); 713 714 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), 715 I915_GFP_ALLOW_FAIL); 716 if (unlikely(!pdp->page_directory)) 717 return -ENOMEM; 718 719 memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes); 720 721 return 0; 722 } 723 724 static void __pdp_fini(struct i915_page_directory_pointer *pdp) 725 { 726 kfree(pdp->page_directory); 727 pdp->page_directory = NULL; 728 } 729 730 static inline bool use_4lvl(const struct i915_address_space *vm) 731 { 732 return i915_vm_is_48bit(vm); 733 } 734 735 static struct i915_page_directory_pointer * 736 alloc_pdp(struct i915_address_space *vm) 737 { 738 struct i915_page_directory_pointer *pdp; 739 int ret = -ENOMEM; 740 741 GEM_BUG_ON(!use_4lvl(vm)); 742 743 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); 744 if (!pdp) 745 return ERR_PTR(-ENOMEM); 746 747 ret = __pdp_init(vm, pdp); 748 if (ret) 749 goto fail_bitmap; 750 751 ret = setup_px(vm, pdp); 752 if (ret) 753 goto fail_page_m; 754 755 return pdp; 756 757 fail_page_m: 758 __pdp_fini(pdp); 759 fail_bitmap: 760 kfree(pdp); 761 762 return ERR_PTR(ret); 763 } 764 765 static void free_pdp(struct i915_address_space *vm, 766 struct i915_page_directory_pointer *pdp) 767 { 768 __pdp_fini(pdp); 769 770 if (!use_4lvl(vm)) 771 return; 772 773 cleanup_px(vm, pdp); 774 kfree(pdp); 775 } 776 777 static void gen8_initialize_pdp(struct i915_address_space *vm, 778 struct i915_page_directory_pointer *pdp) 779 { 780 gen8_ppgtt_pdpe_t scratch_pdpe; 781 782 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); 783 784 fill_px(vm, pdp, scratch_pdpe); 785 } 786 787 static void gen8_initialize_pml4(struct i915_address_space *vm, 788 struct i915_pml4 *pml4) 789 { 790 fill_px(vm, pml4, 791 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); 792 memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); 793 } 794 795 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify 796 * the page table structures, we mark them dirty so that 797 * context switching/execlist queuing code takes extra steps 798 * to ensure that tlbs are flushed. 799 */ 800 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) 801 { 802 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask; 803 } 804 805 /* Removes entries from a single page table, releasing it if it's empty. 806 * Caller can use the return value to update higher-level entries. 807 */ 808 static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, 809 struct i915_page_table *pt, 810 u64 start, u64 length) 811 { 812 unsigned int num_entries = gen8_pte_count(start, length); 813 unsigned int pte = gen8_pte_index(start); 814 unsigned int pte_end = pte + num_entries; 815 gen8_pte_t *vaddr; 816 817 GEM_BUG_ON(num_entries > pt->used_ptes); 818 819 pt->used_ptes -= num_entries; 820 if (!pt->used_ptes) 821 return true; 822 823 vaddr = kmap_atomic_px(pt); 824 while (pte < pte_end) 825 vaddr[pte++] = vm->scratch_pte; 826 kunmap_atomic(vaddr); 827 828 return false; 829 } 830 831 static void gen8_ppgtt_set_pde(struct i915_address_space *vm, 832 struct i915_page_directory *pd, 833 struct i915_page_table *pt, 834 unsigned int pde) 835 { 836 gen8_pde_t *vaddr; 837 838 pd->page_table[pde] = pt; 839 840 vaddr = kmap_atomic_px(pd); 841 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); 842 kunmap_atomic(vaddr); 843 } 844 845 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, 846 struct i915_page_directory *pd, 847 u64 start, u64 length) 848 { 849 struct i915_page_table *pt; 850 u32 pde; 851 852 gen8_for_each_pde(pt, pd, start, length, pde) { 853 GEM_BUG_ON(pt == vm->scratch_pt); 854 855 if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) 856 continue; 857 858 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); 859 GEM_BUG_ON(!pd->used_pdes); 860 pd->used_pdes--; 861 862 free_pt(vm, pt); 863 } 864 865 return !pd->used_pdes; 866 } 867 868 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, 869 struct i915_page_directory_pointer *pdp, 870 struct i915_page_directory *pd, 871 unsigned int pdpe) 872 { 873 gen8_ppgtt_pdpe_t *vaddr; 874 875 pdp->page_directory[pdpe] = pd; 876 if (!use_4lvl(vm)) 877 return; 878 879 vaddr = kmap_atomic_px(pdp); 880 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); 881 kunmap_atomic(vaddr); 882 } 883 884 /* Removes entries from a single page dir pointer, releasing it if it's empty. 885 * Caller can use the return value to update higher-level entries 886 */ 887 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, 888 struct i915_page_directory_pointer *pdp, 889 u64 start, u64 length) 890 { 891 struct i915_page_directory *pd; 892 unsigned int pdpe; 893 894 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 895 GEM_BUG_ON(pd == vm->scratch_pd); 896 897 if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) 898 continue; 899 900 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); 901 GEM_BUG_ON(!pdp->used_pdpes); 902 pdp->used_pdpes--; 903 904 free_pd(vm, pd); 905 } 906 907 return !pdp->used_pdpes; 908 } 909 910 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, 911 u64 start, u64 length) 912 { 913 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length); 914 } 915 916 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4, 917 struct i915_page_directory_pointer *pdp, 918 unsigned int pml4e) 919 { 920 gen8_ppgtt_pml4e_t *vaddr; 921 922 pml4->pdps[pml4e] = pdp; 923 924 vaddr = kmap_atomic_px(pml4); 925 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); 926 kunmap_atomic(vaddr); 927 } 928 929 /* Removes entries from a single pml4. 930 * This is the top-level structure in 4-level page tables used on gen8+. 931 * Empty entries are always scratch pml4e. 932 */ 933 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, 934 u64 start, u64 length) 935 { 936 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 937 struct i915_pml4 *pml4 = &ppgtt->pml4; 938 struct i915_page_directory_pointer *pdp; 939 unsigned int pml4e; 940 941 GEM_BUG_ON(!use_4lvl(vm)); 942 943 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 944 GEM_BUG_ON(pdp == vm->scratch_pdp); 945 946 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) 947 continue; 948 949 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); 950 951 free_pdp(vm, pdp); 952 } 953 } 954 955 static inline struct sgt_dma { 956 struct scatterlist *sg; 957 dma_addr_t dma, max; 958 } sgt_dma(struct i915_vma *vma) { 959 struct scatterlist *sg = vma->pages->sgl; 960 dma_addr_t addr = sg_dma_address(sg); 961 return (struct sgt_dma) { sg, addr, addr + sg->length }; 962 } 963 964 struct gen8_insert_pte { 965 u16 pml4e; 966 u16 pdpe; 967 u16 pde; 968 u16 pte; 969 }; 970 971 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) 972 { 973 return (struct gen8_insert_pte) { 974 gen8_pml4e_index(start), 975 gen8_pdpe_index(start), 976 gen8_pde_index(start), 977 gen8_pte_index(start), 978 }; 979 } 980 981 static __always_inline bool 982 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, 983 struct i915_page_directory_pointer *pdp, 984 struct sgt_dma *iter, 985 struct gen8_insert_pte *idx, 986 enum i915_cache_level cache_level, 987 u32 flags) 988 { 989 struct i915_page_directory *pd; 990 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); 991 gen8_pte_t *vaddr; 992 bool ret; 993 994 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); 995 pd = pdp->page_directory[idx->pdpe]; 996 vaddr = kmap_atomic_px(pd->page_table[idx->pde]); 997 do { 998 vaddr[idx->pte] = pte_encode | iter->dma; 999 1000 iter->dma += I915_GTT_PAGE_SIZE; 1001 if (iter->dma >= iter->max) { 1002 iter->sg = __sg_next(iter->sg); 1003 if (!iter->sg) { 1004 ret = false; 1005 break; 1006 } 1007 1008 iter->dma = sg_dma_address(iter->sg); 1009 iter->max = iter->dma + iter->sg->length; 1010 } 1011 1012 if (++idx->pte == GEN8_PTES) { 1013 idx->pte = 0; 1014 1015 if (++idx->pde == I915_PDES) { 1016 idx->pde = 0; 1017 1018 /* Limited by sg length for 3lvl */ 1019 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { 1020 idx->pdpe = 0; 1021 ret = true; 1022 break; 1023 } 1024 1025 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); 1026 pd = pdp->page_directory[idx->pdpe]; 1027 } 1028 1029 kunmap_atomic(vaddr); 1030 vaddr = kmap_atomic_px(pd->page_table[idx->pde]); 1031 } 1032 } while (1); 1033 kunmap_atomic(vaddr); 1034 1035 return ret; 1036 } 1037 1038 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, 1039 struct i915_vma *vma, 1040 enum i915_cache_level cache_level, 1041 u32 flags) 1042 { 1043 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1044 struct sgt_dma iter = sgt_dma(vma); 1045 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); 1046 1047 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, 1048 cache_level, flags); 1049 1050 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 1051 } 1052 1053 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, 1054 struct i915_page_directory_pointer **pdps, 1055 struct sgt_dma *iter, 1056 enum i915_cache_level cache_level, 1057 u32 flags) 1058 { 1059 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); 1060 u64 start = vma->node.start; 1061 dma_addr_t rem = iter->sg->length; 1062 1063 do { 1064 struct gen8_insert_pte idx = gen8_insert_pte(start); 1065 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e]; 1066 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe]; 1067 unsigned int page_size; 1068 bool maybe_64K = false; 1069 gen8_pte_t encode = pte_encode; 1070 gen8_pte_t *vaddr; 1071 u16 index, max; 1072 1073 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && 1074 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && 1075 rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) { 1076 index = idx.pde; 1077 max = I915_PDES; 1078 page_size = I915_GTT_PAGE_SIZE_2M; 1079 1080 encode |= GEN8_PDE_PS_2M; 1081 1082 vaddr = kmap_atomic_px(pd); 1083 } else { 1084 struct i915_page_table *pt = pd->page_table[idx.pde]; 1085 1086 index = idx.pte; 1087 max = GEN8_PTES; 1088 page_size = I915_GTT_PAGE_SIZE; 1089 1090 if (!index && 1091 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && 1092 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 1093 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 1094 rem >= (max - index) * I915_GTT_PAGE_SIZE)) 1095 maybe_64K = true; 1096 1097 vaddr = kmap_atomic_px(pt); 1098 } 1099 1100 do { 1101 GEM_BUG_ON(iter->sg->length < page_size); 1102 vaddr[index++] = encode | iter->dma; 1103 1104 start += page_size; 1105 iter->dma += page_size; 1106 rem -= page_size; 1107 if (iter->dma >= iter->max) { 1108 iter->sg = __sg_next(iter->sg); 1109 if (!iter->sg) 1110 break; 1111 1112 rem = iter->sg->length; 1113 iter->dma = sg_dma_address(iter->sg); 1114 iter->max = iter->dma + rem; 1115 1116 if (maybe_64K && index < max && 1117 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && 1118 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || 1119 rem >= (max - index) * I915_GTT_PAGE_SIZE))) 1120 maybe_64K = false; 1121 1122 if (unlikely(!IS_ALIGNED(iter->dma, page_size))) 1123 break; 1124 } 1125 } while (rem >= page_size && index < max); 1126 1127 kunmap_atomic(vaddr); 1128 1129 /* 1130 * Is it safe to mark the 2M block as 64K? -- Either we have 1131 * filled whole page-table with 64K entries, or filled part of 1132 * it and have reached the end of the sg table and we have 1133 * enough padding. 1134 */ 1135 if (maybe_64K && 1136 (index == max || 1137 (i915_vm_has_scratch_64K(vma->vm) && 1138 !iter->sg && IS_ALIGNED(vma->node.start + 1139 vma->node.size, 1140 I915_GTT_PAGE_SIZE_2M)))) { 1141 vaddr = kmap_atomic_px(pd); 1142 vaddr[idx.pde] |= GEN8_PDE_IPS_64K; 1143 kunmap_atomic(vaddr); 1144 page_size = I915_GTT_PAGE_SIZE_64K; 1145 1146 /* 1147 * We write all 4K page entries, even when using 64K 1148 * pages. In order to verify that the HW isn't cheating 1149 * by using the 4K PTE instead of the 64K PTE, we want 1150 * to remove all the surplus entries. If the HW skipped 1151 * the 64K PTE, it will read/write into the scratch page 1152 * instead - which we detect as missing results during 1153 * selftests. 1154 */ 1155 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { 1156 u16 i; 1157 1158 encode = vma->vm->scratch_pte; 1159 vaddr = kmap_atomic_px(pd->page_table[idx.pde]); 1160 1161 for (i = 1; i < index; i += 16) 1162 memset64(vaddr + i, encode, 15); 1163 1164 kunmap_atomic(vaddr); 1165 } 1166 } 1167 1168 vma->page_sizes.gtt |= page_size; 1169 } while (iter->sg); 1170 } 1171 1172 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, 1173 struct i915_vma *vma, 1174 enum i915_cache_level cache_level, 1175 u32 flags) 1176 { 1177 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1178 struct sgt_dma iter = sgt_dma(vma); 1179 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; 1180 1181 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 1182 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level, 1183 flags); 1184 } else { 1185 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); 1186 1187 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], 1188 &iter, &idx, cache_level, 1189 flags)) 1190 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); 1191 1192 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 1193 } 1194 } 1195 1196 static void gen8_free_page_tables(struct i915_address_space *vm, 1197 struct i915_page_directory *pd) 1198 { 1199 int i; 1200 1201 for (i = 0; i < I915_PDES; i++) { 1202 if (pd->page_table[i] != vm->scratch_pt) 1203 free_pt(vm, pd->page_table[i]); 1204 } 1205 } 1206 1207 static int gen8_init_scratch(struct i915_address_space *vm) 1208 { 1209 int ret; 1210 1211 /* 1212 * If everybody agrees to not to write into the scratch page, 1213 * we can reuse it for all vm, keeping contexts and processes separate. 1214 */ 1215 if (vm->has_read_only && 1216 vm->i915->kernel_context && 1217 vm->i915->kernel_context->ppgtt) { 1218 struct i915_address_space *clone = 1219 &vm->i915->kernel_context->ppgtt->vm; 1220 1221 GEM_BUG_ON(!clone->has_read_only); 1222 1223 vm->scratch_page.order = clone->scratch_page.order; 1224 vm->scratch_pte = clone->scratch_pte; 1225 vm->scratch_pt = clone->scratch_pt; 1226 vm->scratch_pd = clone->scratch_pd; 1227 vm->scratch_pdp = clone->scratch_pdp; 1228 return 0; 1229 } 1230 1231 ret = setup_scratch_page(vm, __GFP_HIGHMEM); 1232 if (ret) 1233 return ret; 1234 1235 vm->scratch_pte = 1236 gen8_pte_encode(vm->scratch_page.daddr, 1237 I915_CACHE_LLC, 1238 PTE_READ_ONLY); 1239 1240 vm->scratch_pt = alloc_pt(vm); 1241 if (IS_ERR(vm->scratch_pt)) { 1242 ret = PTR_ERR(vm->scratch_pt); 1243 goto free_scratch_page; 1244 } 1245 1246 vm->scratch_pd = alloc_pd(vm); 1247 if (IS_ERR(vm->scratch_pd)) { 1248 ret = PTR_ERR(vm->scratch_pd); 1249 goto free_pt; 1250 } 1251 1252 if (use_4lvl(vm)) { 1253 vm->scratch_pdp = alloc_pdp(vm); 1254 if (IS_ERR(vm->scratch_pdp)) { 1255 ret = PTR_ERR(vm->scratch_pdp); 1256 goto free_pd; 1257 } 1258 } 1259 1260 gen8_initialize_pt(vm, vm->scratch_pt); 1261 gen8_initialize_pd(vm, vm->scratch_pd); 1262 if (use_4lvl(vm)) 1263 gen8_initialize_pdp(vm, vm->scratch_pdp); 1264 1265 return 0; 1266 1267 free_pd: 1268 free_pd(vm, vm->scratch_pd); 1269 free_pt: 1270 free_pt(vm, vm->scratch_pt); 1271 free_scratch_page: 1272 cleanup_scratch_page(vm); 1273 1274 return ret; 1275 } 1276 1277 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 1278 { 1279 struct i915_address_space *vm = &ppgtt->vm; 1280 struct drm_i915_private *dev_priv = vm->i915; 1281 enum vgt_g2v_type msg; 1282 int i; 1283 1284 if (use_4lvl(vm)) { 1285 const u64 daddr = px_dma(&ppgtt->pml4); 1286 1287 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); 1288 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); 1289 1290 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : 1291 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); 1292 } else { 1293 for (i = 0; i < GEN8_3LVL_PDPES; i++) { 1294 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); 1295 1296 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); 1297 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); 1298 } 1299 1300 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : 1301 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); 1302 } 1303 1304 I915_WRITE(vgtif_reg(g2v_notify), msg); 1305 1306 return 0; 1307 } 1308 1309 static void gen8_free_scratch(struct i915_address_space *vm) 1310 { 1311 if (!vm->scratch_page.daddr) 1312 return; 1313 1314 if (use_4lvl(vm)) 1315 free_pdp(vm, vm->scratch_pdp); 1316 free_pd(vm, vm->scratch_pd); 1317 free_pt(vm, vm->scratch_pt); 1318 cleanup_scratch_page(vm); 1319 } 1320 1321 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, 1322 struct i915_page_directory_pointer *pdp) 1323 { 1324 const unsigned int pdpes = i915_pdpes_per_pdp(vm); 1325 int i; 1326 1327 for (i = 0; i < pdpes; i++) { 1328 if (pdp->page_directory[i] == vm->scratch_pd) 1329 continue; 1330 1331 gen8_free_page_tables(vm, pdp->page_directory[i]); 1332 free_pd(vm, pdp->page_directory[i]); 1333 } 1334 1335 free_pdp(vm, pdp); 1336 } 1337 1338 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) 1339 { 1340 int i; 1341 1342 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { 1343 if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp) 1344 continue; 1345 1346 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]); 1347 } 1348 1349 cleanup_px(&ppgtt->vm, &ppgtt->pml4); 1350 } 1351 1352 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 1353 { 1354 struct drm_i915_private *dev_priv = vm->i915; 1355 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1356 1357 if (intel_vgpu_active(dev_priv)) 1358 gen8_ppgtt_notify_vgt(ppgtt, false); 1359 1360 if (use_4lvl(vm)) 1361 gen8_ppgtt_cleanup_4lvl(ppgtt); 1362 else 1363 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp); 1364 1365 gen8_free_scratch(vm); 1366 } 1367 1368 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, 1369 struct i915_page_directory *pd, 1370 u64 start, u64 length) 1371 { 1372 struct i915_page_table *pt; 1373 u64 from = start; 1374 unsigned int pde; 1375 1376 gen8_for_each_pde(pt, pd, start, length, pde) { 1377 int count = gen8_pte_count(start, length); 1378 1379 if (pt == vm->scratch_pt) { 1380 pd->used_pdes++; 1381 1382 pt = alloc_pt(vm); 1383 if (IS_ERR(pt)) { 1384 pd->used_pdes--; 1385 goto unwind; 1386 } 1387 1388 if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) 1389 gen8_initialize_pt(vm, pt); 1390 1391 gen8_ppgtt_set_pde(vm, pd, pt, pde); 1392 GEM_BUG_ON(pd->used_pdes > I915_PDES); 1393 } 1394 1395 pt->used_ptes += count; 1396 } 1397 return 0; 1398 1399 unwind: 1400 gen8_ppgtt_clear_pd(vm, pd, from, start - from); 1401 return -ENOMEM; 1402 } 1403 1404 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, 1405 struct i915_page_directory_pointer *pdp, 1406 u64 start, u64 length) 1407 { 1408 struct i915_page_directory *pd; 1409 u64 from = start; 1410 unsigned int pdpe; 1411 int ret; 1412 1413 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1414 if (pd == vm->scratch_pd) { 1415 pdp->used_pdpes++; 1416 1417 pd = alloc_pd(vm); 1418 if (IS_ERR(pd)) { 1419 pdp->used_pdpes--; 1420 goto unwind; 1421 } 1422 1423 gen8_initialize_pd(vm, pd); 1424 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); 1425 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); 1426 1427 mark_tlbs_dirty(i915_vm_to_ppgtt(vm)); 1428 } 1429 1430 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); 1431 if (unlikely(ret)) 1432 goto unwind_pd; 1433 } 1434 1435 return 0; 1436 1437 unwind_pd: 1438 if (!pd->used_pdes) { 1439 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); 1440 GEM_BUG_ON(!pdp->used_pdpes); 1441 pdp->used_pdpes--; 1442 free_pd(vm, pd); 1443 } 1444 unwind: 1445 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); 1446 return -ENOMEM; 1447 } 1448 1449 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, 1450 u64 start, u64 length) 1451 { 1452 return gen8_ppgtt_alloc_pdp(vm, 1453 &i915_vm_to_ppgtt(vm)->pdp, start, length); 1454 } 1455 1456 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, 1457 u64 start, u64 length) 1458 { 1459 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1460 struct i915_pml4 *pml4 = &ppgtt->pml4; 1461 struct i915_page_directory_pointer *pdp; 1462 u64 from = start; 1463 u32 pml4e; 1464 int ret; 1465 1466 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 1467 if (pml4->pdps[pml4e] == vm->scratch_pdp) { 1468 pdp = alloc_pdp(vm); 1469 if (IS_ERR(pdp)) 1470 goto unwind; 1471 1472 gen8_initialize_pdp(vm, pdp); 1473 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); 1474 } 1475 1476 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); 1477 if (unlikely(ret)) 1478 goto unwind_pdp; 1479 } 1480 1481 return 0; 1482 1483 unwind_pdp: 1484 if (!pdp->used_pdpes) { 1485 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); 1486 free_pdp(vm, pdp); 1487 } 1488 unwind: 1489 gen8_ppgtt_clear_4lvl(vm, from, start - from); 1490 return -ENOMEM; 1491 } 1492 1493 static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, 1494 struct i915_page_directory_pointer *pdp, 1495 u64 start, u64 length, 1496 gen8_pte_t scratch_pte, 1497 struct seq_file *m) 1498 { 1499 struct i915_address_space *vm = &ppgtt->vm; 1500 struct i915_page_directory *pd; 1501 u32 pdpe; 1502 1503 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1504 struct i915_page_table *pt; 1505 u64 pd_len = length; 1506 u64 pd_start = start; 1507 u32 pde; 1508 1509 if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd) 1510 continue; 1511 1512 seq_printf(m, "\tPDPE #%d\n", pdpe); 1513 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { 1514 u32 pte; 1515 gen8_pte_t *pt_vaddr; 1516 1517 if (pd->page_table[pde] == ppgtt->vm.scratch_pt) 1518 continue; 1519 1520 pt_vaddr = kmap_atomic_px(pt); 1521 for (pte = 0; pte < GEN8_PTES; pte += 4) { 1522 u64 va = (pdpe << GEN8_PDPE_SHIFT | 1523 pde << GEN8_PDE_SHIFT | 1524 pte << GEN8_PTE_SHIFT); 1525 int i; 1526 bool found = false; 1527 1528 for (i = 0; i < 4; i++) 1529 if (pt_vaddr[pte + i] != scratch_pte) 1530 found = true; 1531 if (!found) 1532 continue; 1533 1534 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); 1535 for (i = 0; i < 4; i++) { 1536 if (pt_vaddr[pte + i] != scratch_pte) 1537 seq_printf(m, " %llx", pt_vaddr[pte + i]); 1538 else 1539 seq_puts(m, " SCRATCH "); 1540 } 1541 seq_puts(m, "\n"); 1542 } 1543 kunmap_atomic(pt_vaddr); 1544 } 1545 } 1546 } 1547 1548 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1549 { 1550 struct i915_address_space *vm = &ppgtt->vm; 1551 const gen8_pte_t scratch_pte = vm->scratch_pte; 1552 u64 start = 0, length = ppgtt->vm.total; 1553 1554 if (use_4lvl(vm)) { 1555 u64 pml4e; 1556 struct i915_pml4 *pml4 = &ppgtt->pml4; 1557 struct i915_page_directory_pointer *pdp; 1558 1559 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 1560 if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp) 1561 continue; 1562 1563 seq_printf(m, " PML4E #%llu\n", pml4e); 1564 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m); 1565 } 1566 } else { 1567 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m); 1568 } 1569 } 1570 1571 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) 1572 { 1573 struct i915_address_space *vm = &ppgtt->vm; 1574 struct i915_page_directory_pointer *pdp = &ppgtt->pdp; 1575 struct i915_page_directory *pd; 1576 u64 start = 0, length = ppgtt->vm.total; 1577 u64 from = start; 1578 unsigned int pdpe; 1579 1580 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1581 pd = alloc_pd(vm); 1582 if (IS_ERR(pd)) 1583 goto unwind; 1584 1585 gen8_initialize_pd(vm, pd); 1586 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); 1587 pdp->used_pdpes++; 1588 } 1589 1590 pdp->used_pdpes++; /* never remove */ 1591 return 0; 1592 1593 unwind: 1594 start -= from; 1595 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { 1596 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); 1597 free_pd(vm, pd); 1598 } 1599 pdp->used_pdpes = 0; 1600 return -ENOMEM; 1601 } 1602 1603 /* 1604 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 1605 * with a net effect resembling a 2-level page table in normal x86 terms. Each 1606 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address 1607 * space. 1608 * 1609 */ 1610 static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) 1611 { 1612 struct i915_hw_ppgtt *ppgtt; 1613 int err; 1614 1615 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 1616 if (!ppgtt) 1617 return ERR_PTR(-ENOMEM); 1618 1619 kref_init(&ppgtt->ref); 1620 1621 ppgtt->vm.i915 = i915; 1622 ppgtt->vm.dma = &i915->drm.pdev->dev; 1623 1624 ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ? 1625 1ULL << 48 : 1626 1ULL << 32; 1627 1628 /* From bdw, there is support for read-only pages in the PPGTT. */ 1629 ppgtt->vm.has_read_only = true; 1630 1631 i915_address_space_init(&ppgtt->vm, i915); 1632 1633 /* There are only few exceptions for gen >=6. chv and bxt. 1634 * And we are not sure about the latter so play safe for now. 1635 */ 1636 if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) 1637 ppgtt->vm.pt_kmap_wc = true; 1638 1639 err = gen8_init_scratch(&ppgtt->vm); 1640 if (err) 1641 goto err_free; 1642 1643 if (use_4lvl(&ppgtt->vm)) { 1644 err = setup_px(&ppgtt->vm, &ppgtt->pml4); 1645 if (err) 1646 goto err_scratch; 1647 1648 gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4); 1649 1650 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; 1651 ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; 1652 ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; 1653 } else { 1654 err = __pdp_init(&ppgtt->vm, &ppgtt->pdp); 1655 if (err) 1656 goto err_scratch; 1657 1658 if (intel_vgpu_active(i915)) { 1659 err = gen8_preallocate_top_level_pdp(ppgtt); 1660 if (err) { 1661 __pdp_fini(&ppgtt->pdp); 1662 goto err_scratch; 1663 } 1664 } 1665 1666 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; 1667 ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; 1668 ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl; 1669 } 1670 1671 if (intel_vgpu_active(i915)) 1672 gen8_ppgtt_notify_vgt(ppgtt, true); 1673 1674 ppgtt->vm.cleanup = gen8_ppgtt_cleanup; 1675 ppgtt->debug_dump = gen8_dump_ppgtt; 1676 1677 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; 1678 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; 1679 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; 1680 ppgtt->vm.vma_ops.clear_pages = clear_pages; 1681 1682 return ppgtt; 1683 1684 err_scratch: 1685 gen8_free_scratch(&ppgtt->vm); 1686 err_free: 1687 kfree(ppgtt); 1688 return ERR_PTR(err); 1689 } 1690 1691 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) 1692 { 1693 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 1694 const gen6_pte_t scratch_pte = base->vm.scratch_pte; 1695 struct i915_page_table *pt; 1696 u32 pte, pde; 1697 1698 gen6_for_all_pdes(pt, &base->pd, pde) { 1699 gen6_pte_t *vaddr; 1700 1701 if (pt == base->vm.scratch_pt) 1702 continue; 1703 1704 if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { 1705 u32 expected = 1706 GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | 1707 GEN6_PDE_VALID; 1708 u32 pd_entry = readl(ppgtt->pd_addr + pde); 1709 1710 if (pd_entry != expected) 1711 seq_printf(m, 1712 "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", 1713 pde, 1714 pd_entry, 1715 expected); 1716 1717 seq_printf(m, "\tPDE: %x\n", pd_entry); 1718 } 1719 1720 vaddr = kmap_atomic_px(base->pd.page_table[pde]); 1721 for (pte = 0; pte < GEN6_PTES; pte += 4) { 1722 int i; 1723 1724 for (i = 0; i < 4; i++) 1725 if (vaddr[pte + i] != scratch_pte) 1726 break; 1727 if (i == 4) 1728 continue; 1729 1730 seq_printf(m, "\t\t(%03d, %04d) %08llx: ", 1731 pde, pte, 1732 (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); 1733 for (i = 0; i < 4; i++) { 1734 if (vaddr[pte + i] != scratch_pte) 1735 seq_printf(m, " %08x", vaddr[pte + i]); 1736 else 1737 seq_puts(m, " SCRATCH"); 1738 } 1739 seq_puts(m, "\n"); 1740 } 1741 kunmap_atomic(vaddr); 1742 } 1743 } 1744 1745 /* Write pde (index) from the page directory @pd to the page table @pt */ 1746 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, 1747 const unsigned int pde, 1748 const struct i915_page_table *pt) 1749 { 1750 /* Caller needs to make sure the write completes if necessary */ 1751 iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, 1752 ppgtt->pd_addr + pde); 1753 } 1754 1755 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) 1756 { 1757 struct intel_engine_cs *engine; 1758 u32 ecochk, ecobits; 1759 enum intel_engine_id id; 1760 1761 ecobits = I915_READ(GAC_ECO_BITS); 1762 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 1763 1764 ecochk = I915_READ(GAM_ECOCHK); 1765 if (IS_HASWELL(dev_priv)) { 1766 ecochk |= ECOCHK_PPGTT_WB_HSW; 1767 } else { 1768 ecochk |= ECOCHK_PPGTT_LLC_IVB; 1769 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 1770 } 1771 I915_WRITE(GAM_ECOCHK, ecochk); 1772 1773 for_each_engine(engine, dev_priv, id) { 1774 /* GFX_MODE is per-ring on gen7+ */ 1775 I915_WRITE(RING_MODE_GEN7(engine), 1776 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1777 } 1778 } 1779 1780 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) 1781 { 1782 u32 ecochk, gab_ctl, ecobits; 1783 1784 ecobits = I915_READ(GAC_ECO_BITS); 1785 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 1786 ECOBITS_PPGTT_CACHE64B); 1787 1788 gab_ctl = I915_READ(GAB_CTL); 1789 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 1790 1791 ecochk = I915_READ(GAM_ECOCHK); 1792 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 1793 1794 if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */ 1795 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1796 } 1797 1798 /* PPGTT support for Sandybdrige/Gen6 and later */ 1799 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 1800 u64 start, u64 length) 1801 { 1802 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 1803 unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 1804 unsigned int pde = first_entry / GEN6_PTES; 1805 unsigned int pte = first_entry % GEN6_PTES; 1806 unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 1807 const gen6_pte_t scratch_pte = vm->scratch_pte; 1808 1809 while (num_entries) { 1810 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; 1811 const unsigned int end = min(pte + num_entries, GEN6_PTES); 1812 const unsigned int count = end - pte; 1813 gen6_pte_t *vaddr; 1814 1815 GEM_BUG_ON(pt == vm->scratch_pt); 1816 1817 num_entries -= count; 1818 1819 GEM_BUG_ON(count > pt->used_ptes); 1820 pt->used_ptes -= count; 1821 if (!pt->used_ptes) 1822 ppgtt->scan_for_unused_pt = true; 1823 1824 /* 1825 * Note that the hw doesn't support removing PDE on the fly 1826 * (they are cached inside the context with no means to 1827 * invalidate the cache), so we can only reset the PTE 1828 * entries back to scratch. 1829 */ 1830 1831 vaddr = kmap_atomic_px(pt); 1832 do { 1833 vaddr[pte++] = scratch_pte; 1834 } while (pte < end); 1835 kunmap_atomic(vaddr); 1836 1837 pte = 0; 1838 } 1839 } 1840 1841 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 1842 struct i915_vma *vma, 1843 enum i915_cache_level cache_level, 1844 u32 flags) 1845 { 1846 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1847 unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE; 1848 unsigned act_pt = first_entry / GEN6_PTES; 1849 unsigned act_pte = first_entry % GEN6_PTES; 1850 const u32 pte_encode = vm->pte_encode(0, cache_level, flags); 1851 struct sgt_dma iter = sgt_dma(vma); 1852 gen6_pte_t *vaddr; 1853 1854 GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt); 1855 1856 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); 1857 do { 1858 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); 1859 1860 iter.dma += I915_GTT_PAGE_SIZE; 1861 if (iter.dma == iter.max) { 1862 iter.sg = __sg_next(iter.sg); 1863 if (!iter.sg) 1864 break; 1865 1866 iter.dma = sg_dma_address(iter.sg); 1867 iter.max = iter.dma + iter.sg->length; 1868 } 1869 1870 if (++act_pte == GEN6_PTES) { 1871 kunmap_atomic(vaddr); 1872 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]); 1873 act_pte = 0; 1874 } 1875 } while (1); 1876 kunmap_atomic(vaddr); 1877 1878 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 1879 } 1880 1881 static int gen6_alloc_va_range(struct i915_address_space *vm, 1882 u64 start, u64 length) 1883 { 1884 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 1885 struct i915_page_table *pt; 1886 u64 from = start; 1887 unsigned int pde; 1888 bool flush = false; 1889 1890 gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { 1891 const unsigned int count = gen6_pte_count(start, length); 1892 1893 if (pt == vm->scratch_pt) { 1894 pt = alloc_pt(vm); 1895 if (IS_ERR(pt)) 1896 goto unwind_out; 1897 1898 gen6_initialize_pt(vm, pt); 1899 ppgtt->base.pd.page_table[pde] = pt; 1900 1901 if (i915_vma_is_bound(ppgtt->vma, 1902 I915_VMA_GLOBAL_BIND)) { 1903 gen6_write_pde(ppgtt, pde, pt); 1904 flush = true; 1905 } 1906 1907 GEM_BUG_ON(pt->used_ptes); 1908 } 1909 1910 pt->used_ptes += count; 1911 } 1912 1913 if (flush) { 1914 mark_tlbs_dirty(&ppgtt->base); 1915 gen6_ggtt_invalidate(ppgtt->base.vm.i915); 1916 } 1917 1918 return 0; 1919 1920 unwind_out: 1921 gen6_ppgtt_clear_range(vm, from, start - from); 1922 return -ENOMEM; 1923 } 1924 1925 static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) 1926 { 1927 struct i915_address_space * const vm = &ppgtt->base.vm; 1928 struct i915_page_table *unused; 1929 u32 pde; 1930 int ret; 1931 1932 ret = setup_scratch_page(vm, __GFP_HIGHMEM); 1933 if (ret) 1934 return ret; 1935 1936 vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr, 1937 I915_CACHE_NONE, 1938 PTE_READ_ONLY); 1939 1940 vm->scratch_pt = alloc_pt(vm); 1941 if (IS_ERR(vm->scratch_pt)) { 1942 cleanup_scratch_page(vm); 1943 return PTR_ERR(vm->scratch_pt); 1944 } 1945 1946 gen6_initialize_pt(vm, vm->scratch_pt); 1947 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) 1948 ppgtt->base.pd.page_table[pde] = vm->scratch_pt; 1949 1950 return 0; 1951 } 1952 1953 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm) 1954 { 1955 free_pt(vm, vm->scratch_pt); 1956 cleanup_scratch_page(vm); 1957 } 1958 1959 static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt) 1960 { 1961 struct i915_page_table *pt; 1962 u32 pde; 1963 1964 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) 1965 if (pt != ppgtt->base.vm.scratch_pt) 1966 free_pt(&ppgtt->base.vm, pt); 1967 } 1968 1969 static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1970 { 1971 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 1972 1973 i915_vma_destroy(ppgtt->vma); 1974 1975 gen6_ppgtt_free_pd(ppgtt); 1976 gen6_ppgtt_free_scratch(vm); 1977 } 1978 1979 static int pd_vma_set_pages(struct i915_vma *vma) 1980 { 1981 vma->pages = ERR_PTR(-ENODEV); 1982 return 0; 1983 } 1984 1985 static void pd_vma_clear_pages(struct i915_vma *vma) 1986 { 1987 GEM_BUG_ON(!vma->pages); 1988 1989 vma->pages = NULL; 1990 } 1991 1992 static int pd_vma_bind(struct i915_vma *vma, 1993 enum i915_cache_level cache_level, 1994 u32 unused) 1995 { 1996 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); 1997 struct gen6_hw_ppgtt *ppgtt = vma->private; 1998 u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; 1999 struct i915_page_table *pt; 2000 unsigned int pde; 2001 2002 ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); 2003 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; 2004 2005 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) 2006 gen6_write_pde(ppgtt, pde, pt); 2007 2008 mark_tlbs_dirty(&ppgtt->base); 2009 gen6_ggtt_invalidate(ppgtt->base.vm.i915); 2010 2011 return 0; 2012 } 2013 2014 static void pd_vma_unbind(struct i915_vma *vma) 2015 { 2016 struct gen6_hw_ppgtt *ppgtt = vma->private; 2017 struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt; 2018 struct i915_page_table *pt; 2019 unsigned int pde; 2020 2021 if (!ppgtt->scan_for_unused_pt) 2022 return; 2023 2024 /* Free all no longer used page tables */ 2025 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) { 2026 if (pt->used_ptes || pt == scratch_pt) 2027 continue; 2028 2029 free_pt(&ppgtt->base.vm, pt); 2030 ppgtt->base.pd.page_table[pde] = scratch_pt; 2031 } 2032 2033 ppgtt->scan_for_unused_pt = false; 2034 } 2035 2036 static const struct i915_vma_ops pd_vma_ops = { 2037 .set_pages = pd_vma_set_pages, 2038 .clear_pages = pd_vma_clear_pages, 2039 .bind_vma = pd_vma_bind, 2040 .unbind_vma = pd_vma_unbind, 2041 }; 2042 2043 static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) 2044 { 2045 struct drm_i915_private *i915 = ppgtt->base.vm.i915; 2046 struct i915_ggtt *ggtt = &i915->ggtt; 2047 struct i915_vma *vma; 2048 2049 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 2050 GEM_BUG_ON(size > ggtt->vm.total); 2051 2052 vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL); 2053 if (!vma) 2054 return ERR_PTR(-ENOMEM); 2055 2056 init_request_active(&vma->last_fence, NULL); 2057 2058 vma->vm = &ggtt->vm; 2059 vma->ops = &pd_vma_ops; 2060 vma->private = ppgtt; 2061 2062 vma->active = RB_ROOT; 2063 2064 vma->size = size; 2065 vma->fence_size = size; 2066 vma->flags = I915_VMA_GGTT; 2067 vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ 2068 2069 INIT_LIST_HEAD(&vma->obj_link); 2070 list_add(&vma->vm_link, &vma->vm->unbound_list); 2071 2072 return vma; 2073 } 2074 2075 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 2076 { 2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 2078 int err; 2079 2080 /* 2081 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt 2082 * which will be pinned into every active context. 2083 * (When vma->pin_count becomes atomic, I expect we will naturally 2084 * need a larger, unpacked, type and kill this redundancy.) 2085 */ 2086 if (ppgtt->pin_count++) 2087 return 0; 2088 2089 /* 2090 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The 2091 * allocator works in address space sizes, so it's multiplied by page 2092 * size. We allocate at the top of the GTT to avoid fragmentation. 2093 */ 2094 err = i915_vma_pin(ppgtt->vma, 2095 0, GEN6_PD_ALIGN, 2096 PIN_GLOBAL | PIN_HIGH); 2097 if (err) 2098 goto unpin; 2099 2100 return 0; 2101 2102 unpin: 2103 ppgtt->pin_count = 0; 2104 return err; 2105 } 2106 2107 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) 2108 { 2109 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 2110 2111 GEM_BUG_ON(!ppgtt->pin_count); 2112 if (--ppgtt->pin_count) 2113 return; 2114 2115 i915_vma_unpin(ppgtt->vma); 2116 } 2117 2118 static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) 2119 { 2120 struct i915_ggtt * const ggtt = &i915->ggtt; 2121 struct gen6_hw_ppgtt *ppgtt; 2122 int err; 2123 2124 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 2125 if (!ppgtt) 2126 return ERR_PTR(-ENOMEM); 2127 2128 kref_init(&ppgtt->base.ref); 2129 2130 ppgtt->base.vm.i915 = i915; 2131 ppgtt->base.vm.dma = &i915->drm.pdev->dev; 2132 2133 ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; 2134 2135 i915_address_space_init(&ppgtt->base.vm, i915); 2136 2137 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; 2138 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; 2139 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; 2140 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; 2141 ppgtt->base.debug_dump = gen6_dump_ppgtt; 2142 2143 ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma; 2144 ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma; 2145 ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages; 2146 ppgtt->base.vm.vma_ops.clear_pages = clear_pages; 2147 2148 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; 2149 2150 err = gen6_ppgtt_init_scratch(ppgtt); 2151 if (err) 2152 goto err_free; 2153 2154 ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); 2155 if (IS_ERR(ppgtt->vma)) { 2156 err = PTR_ERR(ppgtt->vma); 2157 goto err_scratch; 2158 } 2159 2160 return &ppgtt->base; 2161 2162 err_scratch: 2163 gen6_ppgtt_free_scratch(&ppgtt->base.vm); 2164 err_free: 2165 kfree(ppgtt); 2166 return ERR_PTR(err); 2167 } 2168 2169 static void gtt_write_workarounds(struct drm_i915_private *dev_priv) 2170 { 2171 /* This function is for gtt related workarounds. This function is 2172 * called on driver load and after a GPU reset, so you can place 2173 * workarounds here even if they get overwritten by GPU reset. 2174 */ 2175 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ 2176 if (IS_BROADWELL(dev_priv)) 2177 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); 2178 else if (IS_CHERRYVIEW(dev_priv)) 2179 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); 2180 else if (IS_GEN9_LP(dev_priv)) 2181 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 2182 else if (INTEL_GEN(dev_priv) >= 9) 2183 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); 2184 2185 /* 2186 * To support 64K PTEs we need to first enable the use of the 2187 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical 2188 * mmio, otherwise the page-walker will simply ignore the IPS bit. This 2189 * shouldn't be needed after GEN10. 2190 * 2191 * 64K pages were first introduced from BDW+, although technically they 2192 * only *work* from gen9+. For pre-BDW we instead have the option for 2193 * 32K pages, but we don't currently have any support for it in our 2194 * driver. 2195 */ 2196 if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) && 2197 INTEL_GEN(dev_priv) <= 10) 2198 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, 2199 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | 2200 GAMW_ECO_ENABLE_64K_IPS_FIELD); 2201 } 2202 2203 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) 2204 { 2205 gtt_write_workarounds(dev_priv); 2206 2207 if (IS_GEN6(dev_priv)) 2208 gen6_ppgtt_enable(dev_priv); 2209 else if (IS_GEN7(dev_priv)) 2210 gen7_ppgtt_enable(dev_priv); 2211 2212 return 0; 2213 } 2214 2215 static struct i915_hw_ppgtt * 2216 __hw_ppgtt_create(struct drm_i915_private *i915) 2217 { 2218 if (INTEL_GEN(i915) < 8) 2219 return gen6_ppgtt_create(i915); 2220 else 2221 return gen8_ppgtt_create(i915); 2222 } 2223 2224 struct i915_hw_ppgtt * 2225 i915_ppgtt_create(struct drm_i915_private *i915, 2226 struct drm_i915_file_private *fpriv) 2227 { 2228 struct i915_hw_ppgtt *ppgtt; 2229 2230 ppgtt = __hw_ppgtt_create(i915); 2231 if (IS_ERR(ppgtt)) 2232 return ppgtt; 2233 2234 ppgtt->vm.file = fpriv; 2235 2236 trace_i915_ppgtt_create(&ppgtt->vm); 2237 2238 return ppgtt; 2239 } 2240 2241 void i915_ppgtt_close(struct i915_address_space *vm) 2242 { 2243 GEM_BUG_ON(vm->closed); 2244 vm->closed = true; 2245 } 2246 2247 static void ppgtt_destroy_vma(struct i915_address_space *vm) 2248 { 2249 struct list_head *phases[] = { 2250 &vm->active_list, 2251 &vm->inactive_list, 2252 &vm->unbound_list, 2253 NULL, 2254 }, **phase; 2255 2256 vm->closed = true; 2257 for (phase = phases; *phase; phase++) { 2258 struct i915_vma *vma, *vn; 2259 2260 list_for_each_entry_safe(vma, vn, *phase, vm_link) 2261 i915_vma_destroy(vma); 2262 } 2263 } 2264 2265 void i915_ppgtt_release(struct kref *kref) 2266 { 2267 struct i915_hw_ppgtt *ppgtt = 2268 container_of(kref, struct i915_hw_ppgtt, ref); 2269 2270 trace_i915_ppgtt_release(&ppgtt->vm); 2271 2272 ppgtt_destroy_vma(&ppgtt->vm); 2273 2274 GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list)); 2275 GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list)); 2276 GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); 2277 2278 ppgtt->vm.cleanup(&ppgtt->vm); 2279 i915_address_space_fini(&ppgtt->vm); 2280 kfree(ppgtt); 2281 } 2282 2283 /* Certain Gen5 chipsets require require idling the GPU before 2284 * unmapping anything from the GTT when VT-d is enabled. 2285 */ 2286 static bool needs_idle_maps(struct drm_i915_private *dev_priv) 2287 { 2288 /* Query intel_iommu to see if we need the workaround. Presumably that 2289 * was loaded first. 2290 */ 2291 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active(); 2292 } 2293 2294 static void gen6_check_faults(struct drm_i915_private *dev_priv) 2295 { 2296 struct intel_engine_cs *engine; 2297 enum intel_engine_id id; 2298 u32 fault; 2299 2300 for_each_engine(engine, dev_priv, id) { 2301 fault = I915_READ(RING_FAULT_REG(engine)); 2302 if (fault & RING_FAULT_VALID) { 2303 DRM_DEBUG_DRIVER("Unexpected fault\n" 2304 "\tAddr: 0x%08lx\n" 2305 "\tAddress space: %s\n" 2306 "\tSource ID: %d\n" 2307 "\tType: %d\n", 2308 fault & PAGE_MASK, 2309 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", 2310 RING_FAULT_SRCID(fault), 2311 RING_FAULT_FAULT_TYPE(fault)); 2312 } 2313 } 2314 } 2315 2316 static void gen8_check_faults(struct drm_i915_private *dev_priv) 2317 { 2318 u32 fault = I915_READ(GEN8_RING_FAULT_REG); 2319 2320 if (fault & RING_FAULT_VALID) { 2321 u32 fault_data0, fault_data1; 2322 u64 fault_addr; 2323 2324 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); 2325 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); 2326 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | 2327 ((u64)fault_data0 << 12); 2328 2329 DRM_DEBUG_DRIVER("Unexpected fault\n" 2330 "\tAddr: 0x%08x_%08x\n" 2331 "\tAddress space: %s\n" 2332 "\tEngine ID: %d\n" 2333 "\tSource ID: %d\n" 2334 "\tType: %d\n", 2335 upper_32_bits(fault_addr), 2336 lower_32_bits(fault_addr), 2337 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", 2338 GEN8_RING_FAULT_ENGINE_ID(fault), 2339 RING_FAULT_SRCID(fault), 2340 RING_FAULT_FAULT_TYPE(fault)); 2341 } 2342 } 2343 2344 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) 2345 { 2346 /* From GEN8 onwards we only have one 'All Engine Fault Register' */ 2347 if (INTEL_GEN(dev_priv) >= 8) 2348 gen8_check_faults(dev_priv); 2349 else if (INTEL_GEN(dev_priv) >= 6) 2350 gen6_check_faults(dev_priv); 2351 else 2352 return; 2353 2354 i915_clear_error_registers(dev_priv); 2355 } 2356 2357 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) 2358 { 2359 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2360 2361 /* Don't bother messing with faults pre GEN6 as we have little 2362 * documentation supporting that it's a good idea. 2363 */ 2364 if (INTEL_GEN(dev_priv) < 6) 2365 return; 2366 2367 i915_check_and_clear_faults(dev_priv); 2368 2369 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 2370 2371 i915_ggtt_invalidate(dev_priv); 2372 } 2373 2374 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, 2375 struct sg_table *pages) 2376 { 2377 do { 2378 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev, 2379 pages->sgl, pages->nents, 2380 PCI_DMA_BIDIRECTIONAL, 2381 DMA_ATTR_NO_WARN)) 2382 return 0; 2383 2384 /* If the DMA remap fails, one cause can be that we have 2385 * too many objects pinned in a small remapping table, 2386 * such as swiotlb. Incrementally purge all other objects and 2387 * try again - if there are no more pages to remove from 2388 * the DMA remapper, i915_gem_shrink will return 0. 2389 */ 2390 GEM_BUG_ON(obj->mm.pages == pages); 2391 } while (i915_gem_shrink(to_i915(obj->base.dev), 2392 obj->base.size >> PAGE_SHIFT, NULL, 2393 I915_SHRINK_BOUND | 2394 I915_SHRINK_UNBOUND | 2395 I915_SHRINK_ACTIVE)); 2396 2397 return -ENOSPC; 2398 } 2399 2400 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 2401 { 2402 writeq(pte, addr); 2403 } 2404 2405 static void gen8_ggtt_insert_page(struct i915_address_space *vm, 2406 dma_addr_t addr, 2407 u64 offset, 2408 enum i915_cache_level level, 2409 u32 unused) 2410 { 2411 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2412 gen8_pte_t __iomem *pte = 2413 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2414 2415 gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); 2416 2417 ggtt->invalidate(vm->i915); 2418 } 2419 2420 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2421 struct i915_vma *vma, 2422 enum i915_cache_level level, 2423 u32 flags) 2424 { 2425 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2426 struct sgt_iter sgt_iter; 2427 gen8_pte_t __iomem *gtt_entries; 2428 const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); 2429 dma_addr_t addr; 2430 2431 /* 2432 * Note that we ignore PTE_READ_ONLY here. The caller must be careful 2433 * not to allow the user to override access to a read only page. 2434 */ 2435 2436 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; 2437 gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; 2438 for_each_sgt_dma(addr, sgt_iter, vma->pages) 2439 gen8_set_pte(gtt_entries++, pte_encode | addr); 2440 2441 /* 2442 * We want to flush the TLBs only after we're certain all the PTE 2443 * updates have finished. 2444 */ 2445 ggtt->invalidate(vm->i915); 2446 } 2447 2448 static void gen6_ggtt_insert_page(struct i915_address_space *vm, 2449 dma_addr_t addr, 2450 u64 offset, 2451 enum i915_cache_level level, 2452 u32 flags) 2453 { 2454 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2455 gen6_pte_t __iomem *pte = 2456 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2457 2458 iowrite32(vm->pte_encode(addr, level, flags), pte); 2459 2460 ggtt->invalidate(vm->i915); 2461 } 2462 2463 /* 2464 * Binds an object into the global gtt with the specified cache level. The object 2465 * will be accessible to the GPU via commands whose operands reference offsets 2466 * within the global GTT as well as accessible by the GPU through the GMADR 2467 * mapped BAR (dev_priv->mm.gtt->gtt). 2468 */ 2469 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 2470 struct i915_vma *vma, 2471 enum i915_cache_level level, 2472 u32 flags) 2473 { 2474 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2475 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; 2476 unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; 2477 struct sgt_iter iter; 2478 dma_addr_t addr; 2479 for_each_sgt_dma(addr, iter, vma->pages) 2480 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); 2481 2482 /* 2483 * We want to flush the TLBs only after we're certain all the PTE 2484 * updates have finished. 2485 */ 2486 ggtt->invalidate(vm->i915); 2487 } 2488 2489 static void nop_clear_range(struct i915_address_space *vm, 2490 u64 start, u64 length) 2491 { 2492 } 2493 2494 static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2495 u64 start, u64 length) 2496 { 2497 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2498 unsigned first_entry = start / I915_GTT_PAGE_SIZE; 2499 unsigned num_entries = length / I915_GTT_PAGE_SIZE; 2500 const gen8_pte_t scratch_pte = vm->scratch_pte; 2501 gen8_pte_t __iomem *gtt_base = 2502 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2503 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2504 int i; 2505 2506 if (WARN(num_entries > max_entries, 2507 "First entry = %d; Num entries = %d (max=%d)\n", 2508 first_entry, num_entries, max_entries)) 2509 num_entries = max_entries; 2510 2511 for (i = 0; i < num_entries; i++) 2512 gen8_set_pte(>t_base[i], scratch_pte); 2513 } 2514 2515 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) 2516 { 2517 struct drm_i915_private *dev_priv = vm->i915; 2518 2519 /* 2520 * Make sure the internal GAM fifo has been cleared of all GTT 2521 * writes before exiting stop_machine(). This guarantees that 2522 * any aperture accesses waiting to start in another process 2523 * cannot back up behind the GTT writes causing a hang. 2524 * The register can be any arbitrary GAM register. 2525 */ 2526 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2527 } 2528 2529 struct insert_page { 2530 struct i915_address_space *vm; 2531 dma_addr_t addr; 2532 u64 offset; 2533 enum i915_cache_level level; 2534 }; 2535 2536 static int bxt_vtd_ggtt_insert_page__cb(void *_arg) 2537 { 2538 struct insert_page *arg = _arg; 2539 2540 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); 2541 bxt_vtd_ggtt_wa(arg->vm); 2542 2543 return 0; 2544 } 2545 2546 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, 2547 dma_addr_t addr, 2548 u64 offset, 2549 enum i915_cache_level level, 2550 u32 unused) 2551 { 2552 struct insert_page arg = { vm, addr, offset, level }; 2553 2554 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); 2555 } 2556 2557 struct insert_entries { 2558 struct i915_address_space *vm; 2559 struct i915_vma *vma; 2560 enum i915_cache_level level; 2561 u32 flags; 2562 }; 2563 2564 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) 2565 { 2566 struct insert_entries *arg = _arg; 2567 2568 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); 2569 bxt_vtd_ggtt_wa(arg->vm); 2570 2571 return 0; 2572 } 2573 2574 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, 2575 struct i915_vma *vma, 2576 enum i915_cache_level level, 2577 u32 flags) 2578 { 2579 struct insert_entries arg = { vm, vma, level, flags }; 2580 2581 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); 2582 } 2583 2584 struct clear_range { 2585 struct i915_address_space *vm; 2586 u64 start; 2587 u64 length; 2588 }; 2589 2590 static int bxt_vtd_ggtt_clear_range__cb(void *_arg) 2591 { 2592 struct clear_range *arg = _arg; 2593 2594 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); 2595 bxt_vtd_ggtt_wa(arg->vm); 2596 2597 return 0; 2598 } 2599 2600 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, 2601 u64 start, 2602 u64 length) 2603 { 2604 struct clear_range arg = { vm, start, length }; 2605 2606 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); 2607 } 2608 2609 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2610 u64 start, u64 length) 2611 { 2612 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2613 unsigned first_entry = start / I915_GTT_PAGE_SIZE; 2614 unsigned num_entries = length / I915_GTT_PAGE_SIZE; 2615 gen6_pte_t scratch_pte, __iomem *gtt_base = 2616 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2617 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2618 int i; 2619 2620 if (WARN(num_entries > max_entries, 2621 "First entry = %d; Num entries = %d (max=%d)\n", 2622 first_entry, num_entries, max_entries)) 2623 num_entries = max_entries; 2624 2625 scratch_pte = vm->scratch_pte; 2626 2627 for (i = 0; i < num_entries; i++) 2628 iowrite32(scratch_pte, >t_base[i]); 2629 } 2630 2631 static void i915_ggtt_insert_page(struct i915_address_space *vm, 2632 dma_addr_t addr, 2633 u64 offset, 2634 enum i915_cache_level cache_level, 2635 u32 unused) 2636 { 2637 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2638 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2639 2640 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); 2641 } 2642 2643 static void i915_ggtt_insert_entries(struct i915_address_space *vm, 2644 struct i915_vma *vma, 2645 enum i915_cache_level cache_level, 2646 u32 unused) 2647 { 2648 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2649 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2650 2651 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, 2652 flags); 2653 } 2654 2655 static void i915_ggtt_clear_range(struct i915_address_space *vm, 2656 u64 start, u64 length) 2657 { 2658 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); 2659 } 2660 2661 static int ggtt_bind_vma(struct i915_vma *vma, 2662 enum i915_cache_level cache_level, 2663 u32 flags) 2664 { 2665 struct drm_i915_private *i915 = vma->vm->i915; 2666 struct drm_i915_gem_object *obj = vma->obj; 2667 u32 pte_flags; 2668 2669 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ 2670 pte_flags = 0; 2671 if (i915_gem_object_is_readonly(obj)) 2672 pte_flags |= PTE_READ_ONLY; 2673 2674 intel_runtime_pm_get(i915); 2675 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); 2676 intel_runtime_pm_put(i915); 2677 2678 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 2679 2680 /* 2681 * Without aliasing PPGTT there's no difference between 2682 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally 2683 * upgrade to both bound if we bind either to avoid double-binding. 2684 */ 2685 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 2686 2687 return 0; 2688 } 2689 2690 static void ggtt_unbind_vma(struct i915_vma *vma) 2691 { 2692 struct drm_i915_private *i915 = vma->vm->i915; 2693 2694 intel_runtime_pm_get(i915); 2695 vma->vm->clear_range(vma->vm, vma->node.start, vma->size); 2696 intel_runtime_pm_put(i915); 2697 } 2698 2699 static int aliasing_gtt_bind_vma(struct i915_vma *vma, 2700 enum i915_cache_level cache_level, 2701 u32 flags) 2702 { 2703 struct drm_i915_private *i915 = vma->vm->i915; 2704 u32 pte_flags; 2705 int ret; 2706 2707 /* Currently applicable only to VLV */ 2708 pte_flags = 0; 2709 if (i915_gem_object_is_readonly(vma->obj)) 2710 pte_flags |= PTE_READ_ONLY; 2711 2712 if (flags & I915_VMA_LOCAL_BIND) { 2713 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; 2714 2715 if (!(vma->flags & I915_VMA_LOCAL_BIND)) { 2716 ret = appgtt->vm.allocate_va_range(&appgtt->vm, 2717 vma->node.start, 2718 vma->size); 2719 if (ret) 2720 return ret; 2721 } 2722 2723 appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level, 2724 pte_flags); 2725 } 2726 2727 if (flags & I915_VMA_GLOBAL_BIND) { 2728 intel_runtime_pm_get(i915); 2729 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); 2730 intel_runtime_pm_put(i915); 2731 } 2732 2733 return 0; 2734 } 2735 2736 static void aliasing_gtt_unbind_vma(struct i915_vma *vma) 2737 { 2738 struct drm_i915_private *i915 = vma->vm->i915; 2739 2740 if (vma->flags & I915_VMA_GLOBAL_BIND) { 2741 intel_runtime_pm_get(i915); 2742 vma->vm->clear_range(vma->vm, vma->node.start, vma->size); 2743 intel_runtime_pm_put(i915); 2744 } 2745 2746 if (vma->flags & I915_VMA_LOCAL_BIND) { 2747 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm; 2748 2749 vm->clear_range(vm, vma->node.start, vma->size); 2750 } 2751 } 2752 2753 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, 2754 struct sg_table *pages) 2755 { 2756 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2757 struct device *kdev = &dev_priv->drm.pdev->dev; 2758 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2759 2760 if (unlikely(ggtt->do_idle_maps)) { 2761 if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { 2762 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); 2763 /* Wait a bit, in hopes it avoids the hang */ 2764 udelay(10); 2765 } 2766 } 2767 2768 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); 2769 } 2770 2771 static int ggtt_set_pages(struct i915_vma *vma) 2772 { 2773 int ret; 2774 2775 GEM_BUG_ON(vma->pages); 2776 2777 ret = i915_get_ggtt_vma_pages(vma); 2778 if (ret) 2779 return ret; 2780 2781 vma->page_sizes = vma->obj->mm.page_sizes; 2782 2783 return 0; 2784 } 2785 2786 static void i915_gtt_color_adjust(const struct drm_mm_node *node, 2787 unsigned long color, 2788 u64 *start, 2789 u64 *end) 2790 { 2791 if (node->allocated && node->color != color) 2792 *start += I915_GTT_PAGE_SIZE; 2793 2794 /* Also leave a space between the unallocated reserved node after the 2795 * GTT and any objects within the GTT, i.e. we use the color adjustment 2796 * to insert a guard page to prevent prefetches crossing over the 2797 * GTT boundary. 2798 */ 2799 node = list_next_entry(node, node_list); 2800 if (node->color != color) 2801 *end -= I915_GTT_PAGE_SIZE; 2802 } 2803 2804 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) 2805 { 2806 struct i915_ggtt *ggtt = &i915->ggtt; 2807 struct i915_hw_ppgtt *ppgtt; 2808 int err; 2809 2810 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); 2811 if (IS_ERR(ppgtt)) 2812 return PTR_ERR(ppgtt); 2813 2814 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { 2815 err = -ENODEV; 2816 goto err_ppgtt; 2817 } 2818 2819 /* 2820 * Note we only pre-allocate as far as the end of the global 2821 * GTT. On 48b / 4-level page-tables, the difference is very, 2822 * very significant! We have to preallocate as GVT/vgpu does 2823 * not like the page directory disappearing. 2824 */ 2825 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); 2826 if (err) 2827 goto err_ppgtt; 2828 2829 i915->mm.aliasing_ppgtt = ppgtt; 2830 2831 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); 2832 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; 2833 2834 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); 2835 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; 2836 2837 return 0; 2838 2839 err_ppgtt: 2840 i915_ppgtt_put(ppgtt); 2841 return err; 2842 } 2843 2844 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) 2845 { 2846 struct i915_ggtt *ggtt = &i915->ggtt; 2847 struct i915_hw_ppgtt *ppgtt; 2848 2849 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); 2850 if (!ppgtt) 2851 return; 2852 2853 i915_ppgtt_put(ppgtt); 2854 2855 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 2856 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 2857 } 2858 2859 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) 2860 { 2861 /* Let GEM Manage all of the aperture. 2862 * 2863 * However, leave one page at the end still bound to the scratch page. 2864 * There are a number of places where the hardware apparently prefetches 2865 * past the end of the object, and we've seen multiple hangs with the 2866 * GPU head pointer stuck in a batchbuffer bound at the last page of the 2867 * aperture. One page should be enough to keep any prefetching inside 2868 * of the aperture. 2869 */ 2870 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2871 unsigned long hole_start, hole_end; 2872 struct drm_mm_node *entry; 2873 int ret; 2874 2875 /* 2876 * GuC requires all resources that we're sharing with it to be placed in 2877 * non-WOPCM memory. If GuC is not present or not in use we still need a 2878 * small bias as ring wraparound at offset 0 sometimes hangs. No idea 2879 * why. 2880 */ 2881 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, 2882 intel_guc_reserved_gtt_size(&dev_priv->guc)); 2883 2884 ret = intel_vgt_balloon(dev_priv); 2885 if (ret) 2886 return ret; 2887 2888 /* Reserve a mappable slot for our lockless error capture */ 2889 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, 2890 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 2891 0, ggtt->mappable_end, 2892 DRM_MM_INSERT_LOW); 2893 if (ret) 2894 return ret; 2895 2896 /* Clear any non-preallocated blocks */ 2897 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { 2898 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 2899 hole_start, hole_end); 2900 ggtt->vm.clear_range(&ggtt->vm, hole_start, 2901 hole_end - hole_start); 2902 } 2903 2904 /* And finally clear the reserved guard page */ 2905 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 2906 2907 if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { 2908 ret = i915_gem_init_aliasing_ppgtt(dev_priv); 2909 if (ret) 2910 goto err; 2911 } 2912 2913 return 0; 2914 2915 err: 2916 drm_mm_remove_node(&ggtt->error_capture); 2917 return ret; 2918 } 2919 2920 /** 2921 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization 2922 * @dev_priv: i915 device 2923 */ 2924 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) 2925 { 2926 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2927 struct i915_vma *vma, *vn; 2928 struct pagevec *pvec; 2929 2930 ggtt->vm.closed = true; 2931 2932 mutex_lock(&dev_priv->drm.struct_mutex); 2933 i915_gem_fini_aliasing_ppgtt(dev_priv); 2934 2935 GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); 2936 list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) 2937 WARN_ON(i915_vma_unbind(vma)); 2938 2939 if (drm_mm_node_allocated(&ggtt->error_capture)) 2940 drm_mm_remove_node(&ggtt->error_capture); 2941 2942 if (drm_mm_initialized(&ggtt->vm.mm)) { 2943 intel_vgt_deballoon(dev_priv); 2944 i915_address_space_fini(&ggtt->vm); 2945 } 2946 2947 ggtt->vm.cleanup(&ggtt->vm); 2948 2949 pvec = &dev_priv->mm.wc_stash.pvec; 2950 if (pvec->nr) { 2951 set_pages_array_wb(pvec->pages, pvec->nr); 2952 __pagevec_release(pvec); 2953 } 2954 2955 mutex_unlock(&dev_priv->drm.struct_mutex); 2956 2957 arch_phys_wc_del(ggtt->mtrr); 2958 io_mapping_fini(&ggtt->iomap); 2959 2960 i915_gem_cleanup_stolen(dev_priv); 2961 } 2962 2963 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 2964 { 2965 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 2966 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 2967 return snb_gmch_ctl << 20; 2968 } 2969 2970 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 2971 { 2972 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 2973 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 2974 if (bdw_gmch_ctl) 2975 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 2976 2977 #ifdef CONFIG_X86_32 2978 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ 2979 if (bdw_gmch_ctl > 4) 2980 bdw_gmch_ctl = 4; 2981 #endif 2982 2983 return bdw_gmch_ctl << 20; 2984 } 2985 2986 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 2987 { 2988 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 2989 gmch_ctrl &= SNB_GMCH_GGMS_MASK; 2990 2991 if (gmch_ctrl) 2992 return 1 << (20 + gmch_ctrl); 2993 2994 return 0; 2995 } 2996 2997 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) 2998 { 2999 struct drm_i915_private *dev_priv = ggtt->vm.i915; 3000 struct pci_dev *pdev = dev_priv->drm.pdev; 3001 phys_addr_t phys_addr; 3002 int ret; 3003 3004 /* For Modern GENs the PTEs and register space are split in the BAR */ 3005 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; 3006 3007 /* 3008 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range 3009 * will be dropped. For WC mappings in general we have 64 byte burst 3010 * writes when the WC buffer is flushed, so we can't use it, but have to 3011 * resort to an uncached mapping. The WC issue is easily caught by the 3012 * readback check when writing GTT PTE entries. 3013 */ 3014 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) 3015 ggtt->gsm = ioremap_nocache(phys_addr, size); 3016 else 3017 ggtt->gsm = ioremap_wc(phys_addr, size); 3018 if (!ggtt->gsm) { 3019 DRM_ERROR("Failed to map the ggtt page table\n"); 3020 return -ENOMEM; 3021 } 3022 3023 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); 3024 if (ret) { 3025 DRM_ERROR("Scratch setup failed\n"); 3026 /* iounmap will also get called at remove, but meh */ 3027 iounmap(ggtt->gsm); 3028 return ret; 3029 } 3030 3031 ggtt->vm.scratch_pte = 3032 ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr, 3033 I915_CACHE_NONE, 0); 3034 3035 return 0; 3036 } 3037 3038 static struct intel_ppat_entry * 3039 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value) 3040 { 3041 struct intel_ppat_entry *entry = &ppat->entries[index]; 3042 3043 GEM_BUG_ON(index >= ppat->max_entries); 3044 GEM_BUG_ON(test_bit(index, ppat->used)); 3045 3046 entry->ppat = ppat; 3047 entry->value = value; 3048 kref_init(&entry->ref); 3049 set_bit(index, ppat->used); 3050 set_bit(index, ppat->dirty); 3051 3052 return entry; 3053 } 3054 3055 static void __free_ppat_entry(struct intel_ppat_entry *entry) 3056 { 3057 struct intel_ppat *ppat = entry->ppat; 3058 unsigned int index = entry - ppat->entries; 3059 3060 GEM_BUG_ON(index >= ppat->max_entries); 3061 GEM_BUG_ON(!test_bit(index, ppat->used)); 3062 3063 entry->value = ppat->clear_value; 3064 clear_bit(index, ppat->used); 3065 set_bit(index, ppat->dirty); 3066 } 3067 3068 /** 3069 * intel_ppat_get - get a usable PPAT entry 3070 * @i915: i915 device instance 3071 * @value: the PPAT value required by the caller 3072 * 3073 * The function tries to search if there is an existing PPAT entry which 3074 * matches with the required value. If perfectly matched, the existing PPAT 3075 * entry will be used. If only partially matched, it will try to check if 3076 * there is any available PPAT index. If yes, it will allocate a new PPAT 3077 * index for the required entry and update the HW. If not, the partially 3078 * matched entry will be used. 3079 */ 3080 const struct intel_ppat_entry * 3081 intel_ppat_get(struct drm_i915_private *i915, u8 value) 3082 { 3083 struct intel_ppat *ppat = &i915->ppat; 3084 struct intel_ppat_entry *entry = NULL; 3085 unsigned int scanned, best_score; 3086 int i; 3087 3088 GEM_BUG_ON(!ppat->max_entries); 3089 3090 scanned = best_score = 0; 3091 for_each_set_bit(i, ppat->used, ppat->max_entries) { 3092 unsigned int score; 3093 3094 score = ppat->match(ppat->entries[i].value, value); 3095 if (score > best_score) { 3096 entry = &ppat->entries[i]; 3097 if (score == INTEL_PPAT_PERFECT_MATCH) { 3098 kref_get(&entry->ref); 3099 return entry; 3100 } 3101 best_score = score; 3102 } 3103 scanned++; 3104 } 3105 3106 if (scanned == ppat->max_entries) { 3107 if (!entry) 3108 return ERR_PTR(-ENOSPC); 3109 3110 kref_get(&entry->ref); 3111 return entry; 3112 } 3113 3114 i = find_first_zero_bit(ppat->used, ppat->max_entries); 3115 entry = __alloc_ppat_entry(ppat, i, value); 3116 ppat->update_hw(i915); 3117 return entry; 3118 } 3119 3120 static void release_ppat(struct kref *kref) 3121 { 3122 struct intel_ppat_entry *entry = 3123 container_of(kref, struct intel_ppat_entry, ref); 3124 struct drm_i915_private *i915 = entry->ppat->i915; 3125 3126 __free_ppat_entry(entry); 3127 entry->ppat->update_hw(i915); 3128 } 3129 3130 /** 3131 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get() 3132 * @entry: an intel PPAT entry 3133 * 3134 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the 3135 * entry is dynamically allocated, its reference count will be decreased. Once 3136 * the reference count becomes into zero, the PPAT index becomes free again. 3137 */ 3138 void intel_ppat_put(const struct intel_ppat_entry *entry) 3139 { 3140 struct intel_ppat *ppat = entry->ppat; 3141 unsigned int index = entry - ppat->entries; 3142 3143 GEM_BUG_ON(!ppat->max_entries); 3144 3145 kref_put(&ppat->entries[index].ref, release_ppat); 3146 } 3147 3148 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv) 3149 { 3150 struct intel_ppat *ppat = &dev_priv->ppat; 3151 int i; 3152 3153 for_each_set_bit(i, ppat->dirty, ppat->max_entries) { 3154 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value); 3155 clear_bit(i, ppat->dirty); 3156 } 3157 } 3158 3159 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv) 3160 { 3161 struct intel_ppat *ppat = &dev_priv->ppat; 3162 u64 pat = 0; 3163 int i; 3164 3165 for (i = 0; i < ppat->max_entries; i++) 3166 pat |= GEN8_PPAT(i, ppat->entries[i].value); 3167 3168 bitmap_clear(ppat->dirty, 0, ppat->max_entries); 3169 3170 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); 3171 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); 3172 } 3173 3174 static unsigned int bdw_private_pat_match(u8 src, u8 dst) 3175 { 3176 unsigned int score = 0; 3177 enum { 3178 AGE_MATCH = BIT(0), 3179 TC_MATCH = BIT(1), 3180 CA_MATCH = BIT(2), 3181 }; 3182 3183 /* Cache attribute has to be matched. */ 3184 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst)) 3185 return 0; 3186 3187 score |= CA_MATCH; 3188 3189 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst)) 3190 score |= TC_MATCH; 3191 3192 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst)) 3193 score |= AGE_MATCH; 3194 3195 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH)) 3196 return INTEL_PPAT_PERFECT_MATCH; 3197 3198 return score; 3199 } 3200 3201 static unsigned int chv_private_pat_match(u8 src, u8 dst) 3202 { 3203 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ? 3204 INTEL_PPAT_PERFECT_MATCH : 0; 3205 } 3206 3207 static void cnl_setup_private_ppat(struct intel_ppat *ppat) 3208 { 3209 ppat->max_entries = 8; 3210 ppat->update_hw = cnl_private_pat_update_hw; 3211 ppat->match = bdw_private_pat_match; 3212 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); 3213 3214 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); 3215 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); 3216 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); 3217 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); 3218 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); 3219 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); 3220 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); 3221 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 3222 } 3223 3224 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 3225 * bits. When using advanced contexts each context stores its own PAT, but 3226 * writing this data shouldn't be harmful even in those cases. */ 3227 static void bdw_setup_private_ppat(struct intel_ppat *ppat) 3228 { 3229 ppat->max_entries = 8; 3230 ppat->update_hw = bdw_private_pat_update_hw; 3231 ppat->match = bdw_private_pat_match; 3232 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); 3233 3234 if (!HAS_PPGTT(ppat->i915)) { 3235 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 3236 * so RTL will always use the value corresponding to 3237 * pat_sel = 000". 3238 * So let's disable cache for GGTT to avoid screen corruptions. 3239 * MOCS still can be used though. 3240 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work 3241 * before this patch, i.e. the same uncached + snooping access 3242 * like on gen6/7 seems to be in effect. 3243 * - So this just fixes blitter/render access. Again it looks 3244 * like it's not just uncached access, but uncached + snooping. 3245 * So we can still hold onto all our assumptions wrt cpu 3246 * clflushing on LLC machines. 3247 */ 3248 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); 3249 return; 3250 } 3251 3252 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */ 3253 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */ 3254 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */ 3255 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */ 3256 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); 3257 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); 3258 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); 3259 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 3260 } 3261 3262 static void chv_setup_private_ppat(struct intel_ppat *ppat) 3263 { 3264 ppat->max_entries = 8; 3265 ppat->update_hw = bdw_private_pat_update_hw; 3266 ppat->match = chv_private_pat_match; 3267 ppat->clear_value = CHV_PPAT_SNOOP; 3268 3269 /* 3270 * Map WB on BDW to snooped on CHV. 3271 * 3272 * Only the snoop bit has meaning for CHV, the rest is 3273 * ignored. 3274 * 3275 * The hardware will never snoop for certain types of accesses: 3276 * - CPU GTT (GMADR->GGTT->no snoop->memory) 3277 * - PPGTT page tables 3278 * - some other special cycles 3279 * 3280 * As with BDW, we also need to consider the following for GT accesses: 3281 * "For GGTT, there is NO pat_sel[2:0] from the entry, 3282 * so RTL will always use the value corresponding to 3283 * pat_sel = 000". 3284 * Which means we must set the snoop bit in PAT entry 0 3285 * in order to keep the global status page working. 3286 */ 3287 3288 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP); 3289 __alloc_ppat_entry(ppat, 1, 0); 3290 __alloc_ppat_entry(ppat, 2, 0); 3291 __alloc_ppat_entry(ppat, 3, 0); 3292 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP); 3293 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP); 3294 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP); 3295 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP); 3296 } 3297 3298 static void gen6_gmch_remove(struct i915_address_space *vm) 3299 { 3300 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 3301 3302 iounmap(ggtt->gsm); 3303 cleanup_scratch_page(vm); 3304 } 3305 3306 static void setup_private_pat(struct drm_i915_private *dev_priv) 3307 { 3308 struct intel_ppat *ppat = &dev_priv->ppat; 3309 int i; 3310 3311 ppat->i915 = dev_priv; 3312 3313 if (INTEL_GEN(dev_priv) >= 10) 3314 cnl_setup_private_ppat(ppat); 3315 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) 3316 chv_setup_private_ppat(ppat); 3317 else 3318 bdw_setup_private_ppat(ppat); 3319 3320 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES); 3321 3322 for_each_clear_bit(i, ppat->used, ppat->max_entries) { 3323 ppat->entries[i].value = ppat->clear_value; 3324 ppat->entries[i].ppat = ppat; 3325 set_bit(i, ppat->dirty); 3326 } 3327 3328 ppat->update_hw(dev_priv); 3329 } 3330 3331 static int gen8_gmch_probe(struct i915_ggtt *ggtt) 3332 { 3333 struct drm_i915_private *dev_priv = ggtt->vm.i915; 3334 struct pci_dev *pdev = dev_priv->drm.pdev; 3335 unsigned int size; 3336 u16 snb_gmch_ctl; 3337 int err; 3338 3339 /* TODO: We're not aware of mappable constraints on gen8 yet */ 3340 ggtt->gmadr = 3341 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), 3342 pci_resource_len(pdev, 2)); 3343 ggtt->mappable_end = resource_size(&ggtt->gmadr); 3344 3345 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); 3346 if (!err) 3347 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); 3348 if (err) 3349 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); 3350 3351 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3352 if (IS_CHERRYVIEW(dev_priv)) 3353 size = chv_get_total_gtt_size(snb_gmch_ctl); 3354 else 3355 size = gen8_get_total_gtt_size(snb_gmch_ctl); 3356 3357 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; 3358 ggtt->vm.cleanup = gen6_gmch_remove; 3359 ggtt->vm.insert_page = gen8_ggtt_insert_page; 3360 ggtt->vm.clear_range = nop_clear_range; 3361 if (intel_scanout_needs_vtd_wa(dev_priv)) 3362 ggtt->vm.clear_range = gen8_ggtt_clear_range; 3363 3364 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 3365 3366 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ 3367 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { 3368 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; 3369 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 3370 if (ggtt->vm.clear_range != nop_clear_range) 3371 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; 3372 3373 /* Prevent recursively calling stop_machine() and deadlocks. */ 3374 dev_info(dev_priv->drm.dev, 3375 "Disabling error capture for VT-d workaround\n"); 3376 i915_disable_error_state(dev_priv, -ENODEV); 3377 } 3378 3379 ggtt->invalidate = gen6_ggtt_invalidate; 3380 3381 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 3382 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 3383 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3384 ggtt->vm.vma_ops.clear_pages = clear_pages; 3385 3386 ggtt->vm.pte_encode = gen8_pte_encode; 3387 3388 setup_private_pat(dev_priv); 3389 3390 return ggtt_probe_common(ggtt, size); 3391 } 3392 3393 static int gen6_gmch_probe(struct i915_ggtt *ggtt) 3394 { 3395 struct drm_i915_private *dev_priv = ggtt->vm.i915; 3396 struct pci_dev *pdev = dev_priv->drm.pdev; 3397 unsigned int size; 3398 u16 snb_gmch_ctl; 3399 int err; 3400 3401 ggtt->gmadr = 3402 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), 3403 pci_resource_len(pdev, 2)); 3404 ggtt->mappable_end = resource_size(&ggtt->gmadr); 3405 3406 /* 64/512MB is the current min/max we actually know of, but this is just 3407 * a coarse sanity check. 3408 */ 3409 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { 3410 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); 3411 return -ENXIO; 3412 } 3413 3414 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 3415 if (!err) 3416 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 3417 if (err) 3418 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); 3419 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3420 3421 size = gen6_get_total_gtt_size(snb_gmch_ctl); 3422 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; 3423 3424 ggtt->vm.clear_range = gen6_ggtt_clear_range; 3425 ggtt->vm.insert_page = gen6_ggtt_insert_page; 3426 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; 3427 ggtt->vm.cleanup = gen6_gmch_remove; 3428 3429 ggtt->invalidate = gen6_ggtt_invalidate; 3430 3431 if (HAS_EDRAM(dev_priv)) 3432 ggtt->vm.pte_encode = iris_pte_encode; 3433 else if (IS_HASWELL(dev_priv)) 3434 ggtt->vm.pte_encode = hsw_pte_encode; 3435 else if (IS_VALLEYVIEW(dev_priv)) 3436 ggtt->vm.pte_encode = byt_pte_encode; 3437 else if (INTEL_GEN(dev_priv) >= 7) 3438 ggtt->vm.pte_encode = ivb_pte_encode; 3439 else 3440 ggtt->vm.pte_encode = snb_pte_encode; 3441 3442 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 3443 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 3444 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3445 ggtt->vm.vma_ops.clear_pages = clear_pages; 3446 3447 return ggtt_probe_common(ggtt, size); 3448 } 3449 3450 static void i915_gmch_remove(struct i915_address_space *vm) 3451 { 3452 intel_gmch_remove(); 3453 } 3454 3455 static int i915_gmch_probe(struct i915_ggtt *ggtt) 3456 { 3457 struct drm_i915_private *dev_priv = ggtt->vm.i915; 3458 phys_addr_t gmadr_base; 3459 int ret; 3460 3461 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); 3462 if (!ret) { 3463 DRM_ERROR("failed to set up gmch\n"); 3464 return -EIO; 3465 } 3466 3467 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); 3468 3469 ggtt->gmadr = 3470 (struct resource) DEFINE_RES_MEM(gmadr_base, 3471 ggtt->mappable_end); 3472 3473 ggtt->do_idle_maps = needs_idle_maps(dev_priv); 3474 ggtt->vm.insert_page = i915_ggtt_insert_page; 3475 ggtt->vm.insert_entries = i915_ggtt_insert_entries; 3476 ggtt->vm.clear_range = i915_ggtt_clear_range; 3477 ggtt->vm.cleanup = i915_gmch_remove; 3478 3479 ggtt->invalidate = gmch_ggtt_invalidate; 3480 3481 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 3482 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 3483 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 3484 ggtt->vm.vma_ops.clear_pages = clear_pages; 3485 3486 if (unlikely(ggtt->do_idle_maps)) 3487 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); 3488 3489 return 0; 3490 } 3491 3492 /** 3493 * i915_ggtt_probe_hw - Probe GGTT hardware location 3494 * @dev_priv: i915 device 3495 */ 3496 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) 3497 { 3498 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3499 int ret; 3500 3501 ggtt->vm.i915 = dev_priv; 3502 ggtt->vm.dma = &dev_priv->drm.pdev->dev; 3503 3504 if (INTEL_GEN(dev_priv) <= 5) 3505 ret = i915_gmch_probe(ggtt); 3506 else if (INTEL_GEN(dev_priv) < 8) 3507 ret = gen6_gmch_probe(ggtt); 3508 else 3509 ret = gen8_gmch_probe(ggtt); 3510 if (ret) 3511 return ret; 3512 3513 /* Trim the GGTT to fit the GuC mappable upper range (when enabled). 3514 * This is easier than doing range restriction on the fly, as we 3515 * currently don't have any bits spare to pass in this upper 3516 * restriction! 3517 */ 3518 if (USES_GUC(dev_priv)) { 3519 ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP); 3520 ggtt->mappable_end = 3521 min_t(u64, ggtt->mappable_end, ggtt->vm.total); 3522 } 3523 3524 if ((ggtt->vm.total - 1) >> 32) { 3525 DRM_ERROR("We never expected a Global GTT with more than 32bits" 3526 " of address space! Found %lldM!\n", 3527 ggtt->vm.total >> 20); 3528 ggtt->vm.total = 1ULL << 32; 3529 ggtt->mappable_end = 3530 min_t(u64, ggtt->mappable_end, ggtt->vm.total); 3531 } 3532 3533 if (ggtt->mappable_end > ggtt->vm.total) { 3534 DRM_ERROR("mappable aperture extends past end of GGTT," 3535 " aperture=%pa, total=%llx\n", 3536 &ggtt->mappable_end, ggtt->vm.total); 3537 ggtt->mappable_end = ggtt->vm.total; 3538 } 3539 3540 /* GMADR is the PCI mmio aperture into the global GTT. */ 3541 DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); 3542 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); 3543 DRM_DEBUG_DRIVER("DSM size = %lluM\n", 3544 (u64)resource_size(&intel_graphics_stolen_res) >> 20); 3545 if (intel_vtd_active()) 3546 DRM_INFO("VT-d active for gfx access\n"); 3547 3548 return 0; 3549 } 3550 3551 /** 3552 * i915_ggtt_init_hw - Initialize GGTT hardware 3553 * @dev_priv: i915 device 3554 */ 3555 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) 3556 { 3557 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3558 int ret; 3559 3560 stash_init(&dev_priv->mm.wc_stash); 3561 3562 /* Note that we use page colouring to enforce a guard page at the 3563 * end of the address space. This is required as the CS may prefetch 3564 * beyond the end of the batch buffer, across the page boundary, 3565 * and beyond the end of the GTT if we do not provide a guard. 3566 */ 3567 mutex_lock(&dev_priv->drm.struct_mutex); 3568 i915_address_space_init(&ggtt->vm, dev_priv); 3569 3570 ggtt->vm.is_ggtt = true; 3571 3572 /* Only VLV supports read-only GGTT mappings */ 3573 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); 3574 3575 if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv)) 3576 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; 3577 mutex_unlock(&dev_priv->drm.struct_mutex); 3578 3579 if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, 3580 dev_priv->ggtt.gmadr.start, 3581 dev_priv->ggtt.mappable_end)) { 3582 ret = -EIO; 3583 goto out_gtt_cleanup; 3584 } 3585 3586 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); 3587 3588 /* 3589 * Initialise stolen early so that we may reserve preallocated 3590 * objects for the BIOS to KMS transition. 3591 */ 3592 ret = i915_gem_init_stolen(dev_priv); 3593 if (ret) 3594 goto out_gtt_cleanup; 3595 3596 return 0; 3597 3598 out_gtt_cleanup: 3599 ggtt->vm.cleanup(&ggtt->vm); 3600 return ret; 3601 } 3602 3603 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) 3604 { 3605 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) 3606 return -EIO; 3607 3608 return 0; 3609 } 3610 3611 void i915_ggtt_enable_guc(struct drm_i915_private *i915) 3612 { 3613 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate); 3614 3615 i915->ggtt.invalidate = guc_ggtt_invalidate; 3616 3617 i915_ggtt_invalidate(i915); 3618 } 3619 3620 void i915_ggtt_disable_guc(struct drm_i915_private *i915) 3621 { 3622 /* XXX Temporary pardon for error unload */ 3623 if (i915->ggtt.invalidate == gen6_ggtt_invalidate) 3624 return; 3625 3626 /* We should only be called after i915_ggtt_enable_guc() */ 3627 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); 3628 3629 i915->ggtt.invalidate = gen6_ggtt_invalidate; 3630 3631 i915_ggtt_invalidate(i915); 3632 } 3633 3634 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) 3635 { 3636 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3637 struct i915_vma *vma, *vn; 3638 3639 i915_check_and_clear_faults(dev_priv); 3640 3641 /* First fill our portion of the GTT with scratch pages */ 3642 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 3643 3644 ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ 3645 3646 /* clflush objects bound into the GGTT and rebind them. */ 3647 GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); 3648 list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) { 3649 struct drm_i915_gem_object *obj = vma->obj; 3650 3651 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) 3652 continue; 3653 3654 if (!i915_vma_unbind(vma)) 3655 continue; 3656 3657 WARN_ON(i915_vma_bind(vma, 3658 obj ? obj->cache_level : 0, 3659 PIN_UPDATE)); 3660 if (obj) 3661 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); 3662 } 3663 3664 ggtt->vm.closed = false; 3665 i915_ggtt_invalidate(dev_priv); 3666 3667 if (INTEL_GEN(dev_priv) >= 8) { 3668 struct intel_ppat *ppat = &dev_priv->ppat; 3669 3670 bitmap_set(ppat->dirty, 0, ppat->max_entries); 3671 dev_priv->ppat.update_hw(dev_priv); 3672 return; 3673 } 3674 } 3675 3676 static struct scatterlist * 3677 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 3678 unsigned int width, unsigned int height, 3679 unsigned int stride, 3680 struct sg_table *st, struct scatterlist *sg) 3681 { 3682 unsigned int column, row; 3683 unsigned int src_idx; 3684 3685 for (column = 0; column < width; column++) { 3686 src_idx = stride * (height - 1) + column + offset; 3687 for (row = 0; row < height; row++) { 3688 st->nents++; 3689 /* We don't need the pages, but need to initialize 3690 * the entries so the sg list can be happily traversed. 3691 * The only thing we need are DMA addresses. 3692 */ 3693 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 3694 sg_dma_address(sg) = 3695 i915_gem_object_get_dma_address(obj, src_idx); 3696 sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 3697 sg = sg_next(sg); 3698 src_idx -= stride; 3699 } 3700 } 3701 3702 return sg; 3703 } 3704 3705 static noinline struct sg_table * 3706 intel_rotate_pages(struct intel_rotation_info *rot_info, 3707 struct drm_i915_gem_object *obj) 3708 { 3709 unsigned int size = intel_rotation_info_size(rot_info); 3710 struct sg_table *st; 3711 struct scatterlist *sg; 3712 int ret = -ENOMEM; 3713 int i; 3714 3715 /* Allocate target SG list. */ 3716 st = kmalloc(sizeof(*st), GFP_KERNEL); 3717 if (!st) 3718 goto err_st_alloc; 3719 3720 ret = sg_alloc_table(st, size, GFP_KERNEL); 3721 if (ret) 3722 goto err_sg_alloc; 3723 3724 st->nents = 0; 3725 sg = st->sgl; 3726 3727 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { 3728 sg = rotate_pages(obj, rot_info->plane[i].offset, 3729 rot_info->plane[i].width, rot_info->plane[i].height, 3730 rot_info->plane[i].stride, st, sg); 3731 } 3732 3733 return st; 3734 3735 err_sg_alloc: 3736 kfree(st); 3737 err_st_alloc: 3738 3739 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 3740 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); 3741 3742 return ERR_PTR(ret); 3743 } 3744 3745 static noinline struct sg_table * 3746 intel_partial_pages(const struct i915_ggtt_view *view, 3747 struct drm_i915_gem_object *obj) 3748 { 3749 struct sg_table *st; 3750 struct scatterlist *sg, *iter; 3751 unsigned int count = view->partial.size; 3752 unsigned int offset; 3753 int ret = -ENOMEM; 3754 3755 st = kmalloc(sizeof(*st), GFP_KERNEL); 3756 if (!st) 3757 goto err_st_alloc; 3758 3759 ret = sg_alloc_table(st, count, GFP_KERNEL); 3760 if (ret) 3761 goto err_sg_alloc; 3762 3763 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); 3764 GEM_BUG_ON(!iter); 3765 3766 sg = st->sgl; 3767 st->nents = 0; 3768 do { 3769 unsigned int len; 3770 3771 len = min(iter->length - (offset << PAGE_SHIFT), 3772 count << PAGE_SHIFT); 3773 sg_set_page(sg, NULL, len, 0); 3774 sg_dma_address(sg) = 3775 sg_dma_address(iter) + (offset << PAGE_SHIFT); 3776 sg_dma_len(sg) = len; 3777 3778 st->nents++; 3779 count -= len >> PAGE_SHIFT; 3780 if (count == 0) { 3781 sg_mark_end(sg); 3782 i915_sg_trim(st); /* Drop any unused tail entries. */ 3783 3784 return st; 3785 } 3786 3787 sg = __sg_next(sg); 3788 iter = __sg_next(iter); 3789 offset = 0; 3790 } while (1); 3791 3792 err_sg_alloc: 3793 kfree(st); 3794 err_st_alloc: 3795 return ERR_PTR(ret); 3796 } 3797 3798 static int 3799 i915_get_ggtt_vma_pages(struct i915_vma *vma) 3800 { 3801 int ret; 3802 3803 /* The vma->pages are only valid within the lifespan of the borrowed 3804 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 3805 * must be the vma->pages. A simple rule is that vma->pages must only 3806 * be accessed when the obj->mm.pages are pinned. 3807 */ 3808 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 3809 3810 switch (vma->ggtt_view.type) { 3811 default: 3812 GEM_BUG_ON(vma->ggtt_view.type); 3813 /* fall through */ 3814 case I915_GGTT_VIEW_NORMAL: 3815 vma->pages = vma->obj->mm.pages; 3816 return 0; 3817 3818 case I915_GGTT_VIEW_ROTATED: 3819 vma->pages = 3820 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 3821 break; 3822 3823 case I915_GGTT_VIEW_PARTIAL: 3824 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 3825 break; 3826 } 3827 3828 ret = 0; 3829 if (unlikely(IS_ERR(vma->pages))) { 3830 ret = PTR_ERR(vma->pages); 3831 vma->pages = NULL; 3832 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", 3833 vma->ggtt_view.type, ret); 3834 } 3835 return ret; 3836 } 3837 3838 /** 3839 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) 3840 * @vm: the &struct i915_address_space 3841 * @node: the &struct drm_mm_node (typically i915_vma.mode) 3842 * @size: how much space to allocate inside the GTT, 3843 * must be #I915_GTT_PAGE_SIZE aligned 3844 * @offset: where to insert inside the GTT, 3845 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node 3846 * (@offset + @size) must fit within the address space 3847 * @color: color to apply to node, if this node is not from a VMA, 3848 * color must be #I915_COLOR_UNEVICTABLE 3849 * @flags: control search and eviction behaviour 3850 * 3851 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside 3852 * the address space (using @size and @color). If the @node does not fit, it 3853 * tries to evict any overlapping nodes from the GTT, including any 3854 * neighbouring nodes if the colors do not match (to ensure guard pages between 3855 * differing domains). See i915_gem_evict_for_node() for the gory details 3856 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on 3857 * evicting active overlapping objects, and any overlapping node that is pinned 3858 * or marked as unevictable will also result in failure. 3859 * 3860 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if 3861 * asked to wait for eviction and interrupted. 3862 */ 3863 int i915_gem_gtt_reserve(struct i915_address_space *vm, 3864 struct drm_mm_node *node, 3865 u64 size, u64 offset, unsigned long color, 3866 unsigned int flags) 3867 { 3868 int err; 3869 3870 GEM_BUG_ON(!size); 3871 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 3872 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); 3873 GEM_BUG_ON(range_overflows(offset, size, vm->total)); 3874 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 3875 GEM_BUG_ON(drm_mm_node_allocated(node)); 3876 3877 node->size = size; 3878 node->start = offset; 3879 node->color = color; 3880 3881 err = drm_mm_reserve_node(&vm->mm, node); 3882 if (err != -ENOSPC) 3883 return err; 3884 3885 if (flags & PIN_NOEVICT) 3886 return -ENOSPC; 3887 3888 err = i915_gem_evict_for_node(vm, node, flags); 3889 if (err == 0) 3890 err = drm_mm_reserve_node(&vm->mm, node); 3891 3892 return err; 3893 } 3894 3895 static u64 random_offset(u64 start, u64 end, u64 len, u64 align) 3896 { 3897 u64 range, addr; 3898 3899 GEM_BUG_ON(range_overflows(start, len, end)); 3900 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align)); 3901 3902 range = round_down(end - len, align) - round_up(start, align); 3903 if (range) { 3904 if (sizeof(unsigned long) == sizeof(u64)) { 3905 addr = get_random_long(); 3906 } else { 3907 addr = get_random_int(); 3908 if (range > U32_MAX) { 3909 addr <<= 32; 3910 addr |= get_random_int(); 3911 } 3912 } 3913 div64_u64_rem(addr, range, &addr); 3914 start += addr; 3915 } 3916 3917 return round_up(start, align); 3918 } 3919 3920 /** 3921 * i915_gem_gtt_insert - insert a node into an address_space (GTT) 3922 * @vm: the &struct i915_address_space 3923 * @node: the &struct drm_mm_node (typically i915_vma.node) 3924 * @size: how much space to allocate inside the GTT, 3925 * must be #I915_GTT_PAGE_SIZE aligned 3926 * @alignment: required alignment of starting offset, may be 0 but 3927 * if specified, this must be a power-of-two and at least 3928 * #I915_GTT_MIN_ALIGNMENT 3929 * @color: color to apply to node 3930 * @start: start of any range restriction inside GTT (0 for all), 3931 * must be #I915_GTT_PAGE_SIZE aligned 3932 * @end: end of any range restriction inside GTT (U64_MAX for all), 3933 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX 3934 * @flags: control search and eviction behaviour 3935 * 3936 * i915_gem_gtt_insert() first searches for an available hole into which 3937 * is can insert the node. The hole address is aligned to @alignment and 3938 * its @size must then fit entirely within the [@start, @end] bounds. The 3939 * nodes on either side of the hole must match @color, or else a guard page 3940 * will be inserted between the two nodes (or the node evicted). If no 3941 * suitable hole is found, first a victim is randomly selected and tested 3942 * for eviction, otherwise then the LRU list of objects within the GTT 3943 * is scanned to find the first set of replacement nodes to create the hole. 3944 * Those old overlapping nodes are evicted from the GTT (and so must be 3945 * rebound before any future use). Any node that is currently pinned cannot 3946 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently 3947 * active and #PIN_NONBLOCK is specified, that node is also skipped when 3948 * searching for an eviction candidate. See i915_gem_evict_something() for 3949 * the gory details on the eviction algorithm. 3950 * 3951 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if 3952 * asked to wait for eviction and interrupted. 3953 */ 3954 int i915_gem_gtt_insert(struct i915_address_space *vm, 3955 struct drm_mm_node *node, 3956 u64 size, u64 alignment, unsigned long color, 3957 u64 start, u64 end, unsigned int flags) 3958 { 3959 enum drm_mm_insert_mode mode; 3960 u64 offset; 3961 int err; 3962 3963 lockdep_assert_held(&vm->i915->drm.struct_mutex); 3964 GEM_BUG_ON(!size); 3965 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 3966 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 3967 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 3968 GEM_BUG_ON(start >= end); 3969 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 3970 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 3971 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 3972 GEM_BUG_ON(drm_mm_node_allocated(node)); 3973 3974 if (unlikely(range_overflows(start, size, end))) 3975 return -ENOSPC; 3976 3977 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) 3978 return -ENOSPC; 3979 3980 mode = DRM_MM_INSERT_BEST; 3981 if (flags & PIN_HIGH) 3982 mode = DRM_MM_INSERT_HIGHEST; 3983 if (flags & PIN_MAPPABLE) 3984 mode = DRM_MM_INSERT_LOW; 3985 3986 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, 3987 * so we know that we always have a minimum alignment of 4096. 3988 * The drm_mm range manager is optimised to return results 3989 * with zero alignment, so where possible use the optimal 3990 * path. 3991 */ 3992 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE); 3993 if (alignment <= I915_GTT_MIN_ALIGNMENT) 3994 alignment = 0; 3995 3996 err = drm_mm_insert_node_in_range(&vm->mm, node, 3997 size, alignment, color, 3998 start, end, mode); 3999 if (err != -ENOSPC) 4000 return err; 4001 4002 if (mode & DRM_MM_INSERT_ONCE) { 4003 err = drm_mm_insert_node_in_range(&vm->mm, node, 4004 size, alignment, color, 4005 start, end, 4006 DRM_MM_INSERT_BEST); 4007 if (err != -ENOSPC) 4008 return err; 4009 } 4010 4011 if (flags & PIN_NOEVICT) 4012 return -ENOSPC; 4013 4014 /* No free space, pick a slot at random. 4015 * 4016 * There is a pathological case here using a GTT shared between 4017 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt): 4018 * 4019 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->| 4020 * (64k objects) (448k objects) 4021 * 4022 * Now imagine that the eviction LRU is ordered top-down (just because 4023 * pathology meets real life), and that we need to evict an object to 4024 * make room inside the aperture. The eviction scan then has to walk 4025 * the 448k list before it finds one within range. And now imagine that 4026 * it has to search for a new hole between every byte inside the memcpy, 4027 * for several simultaneous clients. 4028 * 4029 * On a full-ppgtt system, if we have run out of available space, there 4030 * will be lots and lots of objects in the eviction list! Again, 4031 * searching that LRU list may be slow if we are also applying any 4032 * range restrictions (e.g. restriction to low 4GiB) and so, for 4033 * simplicity and similarilty between different GTT, try the single 4034 * random replacement first. 4035 */ 4036 offset = random_offset(start, end, 4037 size, alignment ?: I915_GTT_MIN_ALIGNMENT); 4038 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags); 4039 if (err != -ENOSPC) 4040 return err; 4041 4042 /* Randomly selected placement is pinned, do a search */ 4043 err = i915_gem_evict_something(vm, size, alignment, color, 4044 start, end, flags); 4045 if (err) 4046 return err; 4047 4048 return drm_mm_insert_node_in_range(&vm->mm, node, 4049 size, alignment, color, 4050 start, end, DRM_MM_INSERT_EVICT); 4051 } 4052 4053 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 4054 #include "selftests/mock_gtt.c" 4055 #include "selftests/i915_gem_gtt.c" 4056 #endif 4057