12c86e55dSMatthew Auld // SPDX-License-Identifier: MIT 22c86e55dSMatthew Auld /* 32c86e55dSMatthew Auld * Copyright © 2020 Intel Corporation 42c86e55dSMatthew Auld */ 52c86e55dSMatthew Auld 62c86e55dSMatthew Auld #include <linux/stop_machine.h> 72c86e55dSMatthew Auld 82c86e55dSMatthew Auld #include <asm/set_memory.h> 98801eb48SChen Zhou #include <asm/smp.h> 102c86e55dSMatthew Auld 1183d2bdb6SJani Nikula #include <drm/i915_drm.h> 1283d2bdb6SJani Nikula 132c86e55dSMatthew Auld #include "intel_gt.h" 142c86e55dSMatthew Auld #include "i915_drv.h" 152c86e55dSMatthew Auld #include "i915_scatterlist.h" 162c86e55dSMatthew Auld #include "i915_vgpu.h" 172c86e55dSMatthew Auld 182c86e55dSMatthew Auld #include "intel_gtt.h" 192c86e55dSMatthew Auld 202c86e55dSMatthew Auld static int 212c86e55dSMatthew Auld i915_get_ggtt_vma_pages(struct i915_vma *vma); 222c86e55dSMatthew Auld 232c86e55dSMatthew Auld static void i915_ggtt_color_adjust(const struct drm_mm_node *node, 242c86e55dSMatthew Auld unsigned long color, 252c86e55dSMatthew Auld u64 *start, 262c86e55dSMatthew Auld u64 *end) 272c86e55dSMatthew Auld { 282c86e55dSMatthew Auld if (i915_node_color_differs(node, color)) 292c86e55dSMatthew Auld *start += I915_GTT_PAGE_SIZE; 302c86e55dSMatthew Auld 312c86e55dSMatthew Auld /* 322c86e55dSMatthew Auld * Also leave a space between the unallocated reserved node after the 332c86e55dSMatthew Auld * GTT and any objects within the GTT, i.e. we use the color adjustment 342c86e55dSMatthew Auld * to insert a guard page to prevent prefetches crossing over the 352c86e55dSMatthew Auld * GTT boundary. 362c86e55dSMatthew Auld */ 372c86e55dSMatthew Auld node = list_next_entry(node, node_list); 382c86e55dSMatthew Auld if (node->color != color) 392c86e55dSMatthew Auld *end -= I915_GTT_PAGE_SIZE; 402c86e55dSMatthew Auld } 412c86e55dSMatthew Auld 422c86e55dSMatthew Auld static int ggtt_init_hw(struct i915_ggtt *ggtt) 432c86e55dSMatthew Auld { 442c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 452c86e55dSMatthew Auld 462c86e55dSMatthew Auld i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); 472c86e55dSMatthew Auld 482c86e55dSMatthew Auld ggtt->vm.is_ggtt = true; 492c86e55dSMatthew Auld 502c86e55dSMatthew Auld /* Only VLV supports read-only GGTT mappings */ 512c86e55dSMatthew Auld ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); 522c86e55dSMatthew Auld 532c86e55dSMatthew Auld if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) 542c86e55dSMatthew Auld ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; 552c86e55dSMatthew Auld 562c86e55dSMatthew Auld if (ggtt->mappable_end) { 572c86e55dSMatthew Auld if (!io_mapping_init_wc(&ggtt->iomap, 582c86e55dSMatthew Auld ggtt->gmadr.start, 592c86e55dSMatthew Auld ggtt->mappable_end)) { 602c86e55dSMatthew Auld ggtt->vm.cleanup(&ggtt->vm); 612c86e55dSMatthew Auld return -EIO; 622c86e55dSMatthew Auld } 632c86e55dSMatthew Auld 642c86e55dSMatthew Auld ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, 652c86e55dSMatthew Auld ggtt->mappable_end); 662c86e55dSMatthew Auld } 672c86e55dSMatthew Auld 68f899f786SChris Wilson intel_ggtt_init_fences(ggtt); 692c86e55dSMatthew Auld 702c86e55dSMatthew Auld return 0; 712c86e55dSMatthew Auld } 722c86e55dSMatthew Auld 732c86e55dSMatthew Auld /** 742c86e55dSMatthew Auld * i915_ggtt_init_hw - Initialize GGTT hardware 752c86e55dSMatthew Auld * @i915: i915 device 762c86e55dSMatthew Auld */ 772c86e55dSMatthew Auld int i915_ggtt_init_hw(struct drm_i915_private *i915) 782c86e55dSMatthew Auld { 792c86e55dSMatthew Auld int ret; 802c86e55dSMatthew Auld 812c86e55dSMatthew Auld /* 822c86e55dSMatthew Auld * Note that we use page colouring to enforce a guard page at the 832c86e55dSMatthew Auld * end of the address space. This is required as the CS may prefetch 842c86e55dSMatthew Auld * beyond the end of the batch buffer, across the page boundary, 852c86e55dSMatthew Auld * and beyond the end of the GTT if we do not provide a guard. 862c86e55dSMatthew Auld */ 872c86e55dSMatthew Auld ret = ggtt_init_hw(&i915->ggtt); 882c86e55dSMatthew Auld if (ret) 892c86e55dSMatthew Auld return ret; 902c86e55dSMatthew Auld 912c86e55dSMatthew Auld return 0; 922c86e55dSMatthew Auld } 932c86e55dSMatthew Auld 942c86e55dSMatthew Auld /* 95*1ca9b8daSChris Wilson * Certain Gen5 chipsets require idling the GPU before 962c86e55dSMatthew Auld * unmapping anything from the GTT when VT-d is enabled. 972c86e55dSMatthew Auld */ 982c86e55dSMatthew Auld static bool needs_idle_maps(struct drm_i915_private *i915) 992c86e55dSMatthew Auld { 1002c86e55dSMatthew Auld /* 1012c86e55dSMatthew Auld * Query intel_iommu to see if we need the workaround. Presumably that 1022c86e55dSMatthew Auld * was loaded first. 1032c86e55dSMatthew Auld */ 10484361529SChris Wilson if (!intel_vtd_active()) 10584361529SChris Wilson return false; 10684361529SChris Wilson 10784361529SChris Wilson if (IS_GEN(i915, 5) && IS_MOBILE(i915)) 10884361529SChris Wilson return true; 10984361529SChris Wilson 11084361529SChris Wilson if (IS_GEN(i915, 12)) 11184361529SChris Wilson return true; /* XXX DMAR fault reason 7 */ 11284361529SChris Wilson 11384361529SChris Wilson return false; 1142c86e55dSMatthew Auld } 1152c86e55dSMatthew Auld 116e986209cSChris Wilson void i915_ggtt_suspend(struct i915_ggtt *ggtt) 1172c86e55dSMatthew Auld { 118bffa18ddSChris Wilson struct i915_vma *vma, *vn; 119bffa18ddSChris Wilson int open; 120e3793468SChris Wilson 121bffa18ddSChris Wilson mutex_lock(&ggtt->vm.mutex); 122bffa18ddSChris Wilson 123bffa18ddSChris Wilson /* Skip rewriting PTE on VMA unbind. */ 124bffa18ddSChris Wilson open = atomic_xchg(&ggtt->vm.open, 0); 125bffa18ddSChris Wilson 126bffa18ddSChris Wilson list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { 127bffa18ddSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 128e3793468SChris Wilson i915_vma_wait_for_bind(vma); 129e3793468SChris Wilson 130bffa18ddSChris Wilson if (i915_vma_is_pinned(vma)) 131bffa18ddSChris Wilson continue; 132bffa18ddSChris Wilson 133bffa18ddSChris Wilson if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { 134bffa18ddSChris Wilson __i915_vma_evict(vma); 135bffa18ddSChris Wilson drm_mm_remove_node(&vma->node); 136bffa18ddSChris Wilson } 137bffa18ddSChris Wilson } 138bffa18ddSChris Wilson 139e986209cSChris Wilson ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 140e986209cSChris Wilson ggtt->invalidate(ggtt); 141bffa18ddSChris Wilson atomic_set(&ggtt->vm.open, open); 142bffa18ddSChris Wilson 143bffa18ddSChris Wilson mutex_unlock(&ggtt->vm.mutex); 1442c86e55dSMatthew Auld 1452c86e55dSMatthew Auld intel_gt_check_and_clear_faults(ggtt->vm.gt); 1462c86e55dSMatthew Auld } 1472c86e55dSMatthew Auld 1482c86e55dSMatthew Auld void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) 1492c86e55dSMatthew Auld { 1502c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1512c86e55dSMatthew Auld 1522c86e55dSMatthew Auld spin_lock_irq(&uncore->lock); 1532c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1542c86e55dSMatthew Auld intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); 1552c86e55dSMatthew Auld spin_unlock_irq(&uncore->lock); 1562c86e55dSMatthew Auld } 1572c86e55dSMatthew Auld 1582c86e55dSMatthew Auld static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) 1592c86e55dSMatthew Auld { 1602c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1612c86e55dSMatthew Auld 1622c86e55dSMatthew Auld /* 1632c86e55dSMatthew Auld * Note that as an uncached mmio write, this will flush the 1642c86e55dSMatthew Auld * WCB of the writes into the GGTT before it triggers the invalidate. 1652c86e55dSMatthew Auld */ 1662c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1672c86e55dSMatthew Auld } 1682c86e55dSMatthew Auld 1692c86e55dSMatthew Auld static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) 1702c86e55dSMatthew Auld { 1712c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1722c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 1732c86e55dSMatthew Auld 1742c86e55dSMatthew Auld gen8_ggtt_invalidate(ggtt); 1752c86e55dSMatthew Auld 1762c86e55dSMatthew Auld if (INTEL_GEN(i915) >= 12) 1772c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, 1782c86e55dSMatthew Auld GEN12_GUC_TLB_INV_CR_INVALIDATE); 1792c86e55dSMatthew Auld else 1802c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); 1812c86e55dSMatthew Auld } 1822c86e55dSMatthew Auld 1832c86e55dSMatthew Auld static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) 1842c86e55dSMatthew Auld { 1852c86e55dSMatthew Auld intel_gtt_chipset_flush(); 1862c86e55dSMatthew Auld } 1872c86e55dSMatthew Auld 18869edc390SDaniele Ceraolo Spurio static u64 gen8_ggtt_pte_encode(dma_addr_t addr, 18969edc390SDaniele Ceraolo Spurio enum i915_cache_level level, 19069edc390SDaniele Ceraolo Spurio u32 flags) 19169edc390SDaniele Ceraolo Spurio { 19269edc390SDaniele Ceraolo Spurio return addr | _PAGE_PRESENT; 19369edc390SDaniele Ceraolo Spurio } 19469edc390SDaniele Ceraolo Spurio 1952c86e55dSMatthew Auld static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 1962c86e55dSMatthew Auld { 1972c86e55dSMatthew Auld writeq(pte, addr); 1982c86e55dSMatthew Auld } 1992c86e55dSMatthew Auld 2002c86e55dSMatthew Auld static void gen8_ggtt_insert_page(struct i915_address_space *vm, 2012c86e55dSMatthew Auld dma_addr_t addr, 2022c86e55dSMatthew Auld u64 offset, 2032c86e55dSMatthew Auld enum i915_cache_level level, 2042c86e55dSMatthew Auld u32 unused) 2052c86e55dSMatthew Auld { 2062c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2072c86e55dSMatthew Auld gen8_pte_t __iomem *pte = 2082c86e55dSMatthew Auld (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2092c86e55dSMatthew Auld 21069edc390SDaniele Ceraolo Spurio gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0)); 2112c86e55dSMatthew Auld 2122c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2132c86e55dSMatthew Auld } 2142c86e55dSMatthew Auld 2152c86e55dSMatthew Auld static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2162c86e55dSMatthew Auld struct i915_vma *vma, 2172c86e55dSMatthew Auld enum i915_cache_level level, 2182c86e55dSMatthew Auld u32 flags) 2192c86e55dSMatthew Auld { 22069edc390SDaniele Ceraolo Spurio const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); 2214d6c1859SChris Wilson struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2224d6c1859SChris Wilson gen8_pte_t __iomem *gte; 2234d6c1859SChris Wilson gen8_pte_t __iomem *end; 2244d6c1859SChris Wilson struct sgt_iter iter; 2252c86e55dSMatthew Auld dma_addr_t addr; 2262c86e55dSMatthew Auld 2272c86e55dSMatthew Auld /* 2282c86e55dSMatthew Auld * Note that we ignore PTE_READ_ONLY here. The caller must be careful 2292c86e55dSMatthew Auld * not to allow the user to override access to a read only page. 2302c86e55dSMatthew Auld */ 2312c86e55dSMatthew Auld 2324d6c1859SChris Wilson gte = (gen8_pte_t __iomem *)ggtt->gsm; 2334d6c1859SChris Wilson gte += vma->node.start / I915_GTT_PAGE_SIZE; 2344d6c1859SChris Wilson end = gte + vma->node.size / I915_GTT_PAGE_SIZE; 2354d6c1859SChris Wilson 2364d6c1859SChris Wilson for_each_sgt_daddr(addr, iter, vma->pages) 2374d6c1859SChris Wilson gen8_set_pte(gte++, pte_encode | addr); 2384d6c1859SChris Wilson GEM_BUG_ON(gte > end); 2394d6c1859SChris Wilson 2404d6c1859SChris Wilson /* Fill the allocated but "unused" space beyond the end of the buffer */ 2414d6c1859SChris Wilson while (gte < end) 24289351925SChris Wilson gen8_set_pte(gte++, vm->scratch[0]->encode); 2432c86e55dSMatthew Auld 2442c86e55dSMatthew Auld /* 2452c86e55dSMatthew Auld * We want to flush the TLBs only after we're certain all the PTE 2462c86e55dSMatthew Auld * updates have finished. 2472c86e55dSMatthew Auld */ 2482c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2492c86e55dSMatthew Auld } 2502c86e55dSMatthew Auld 2512c86e55dSMatthew Auld static void gen6_ggtt_insert_page(struct i915_address_space *vm, 2522c86e55dSMatthew Auld dma_addr_t addr, 2532c86e55dSMatthew Auld u64 offset, 2542c86e55dSMatthew Auld enum i915_cache_level level, 2552c86e55dSMatthew Auld u32 flags) 2562c86e55dSMatthew Auld { 2572c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2582c86e55dSMatthew Auld gen6_pte_t __iomem *pte = 2592c86e55dSMatthew Auld (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2602c86e55dSMatthew Auld 2612c86e55dSMatthew Auld iowrite32(vm->pte_encode(addr, level, flags), pte); 2622c86e55dSMatthew Auld 2632c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2642c86e55dSMatthew Auld } 2652c86e55dSMatthew Auld 2662c86e55dSMatthew Auld /* 2672c86e55dSMatthew Auld * Binds an object into the global gtt with the specified cache level. 2682c86e55dSMatthew Auld * The object will be accessible to the GPU via commands whose operands 2692c86e55dSMatthew Auld * reference offsets within the global GTT as well as accessible by the GPU 2702c86e55dSMatthew Auld * through the GMADR mapped BAR (i915->mm.gtt->gtt). 2712c86e55dSMatthew Auld */ 2722c86e55dSMatthew Auld static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 2732c86e55dSMatthew Auld struct i915_vma *vma, 2742c86e55dSMatthew Auld enum i915_cache_level level, 2752c86e55dSMatthew Auld u32 flags) 2762c86e55dSMatthew Auld { 2772c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2784d6c1859SChris Wilson gen6_pte_t __iomem *gte; 2794d6c1859SChris Wilson gen6_pte_t __iomem *end; 2802c86e55dSMatthew Auld struct sgt_iter iter; 2812c86e55dSMatthew Auld dma_addr_t addr; 2822c86e55dSMatthew Auld 2834d6c1859SChris Wilson gte = (gen6_pte_t __iomem *)ggtt->gsm; 2844d6c1859SChris Wilson gte += vma->node.start / I915_GTT_PAGE_SIZE; 2854d6c1859SChris Wilson end = gte + vma->node.size / I915_GTT_PAGE_SIZE; 2864d6c1859SChris Wilson 2872c86e55dSMatthew Auld for_each_sgt_daddr(addr, iter, vma->pages) 2884d6c1859SChris Wilson iowrite32(vm->pte_encode(addr, level, flags), gte++); 2894d6c1859SChris Wilson GEM_BUG_ON(gte > end); 2904d6c1859SChris Wilson 2914d6c1859SChris Wilson /* Fill the allocated but "unused" space beyond the end of the buffer */ 2924d6c1859SChris Wilson while (gte < end) 29389351925SChris Wilson iowrite32(vm->scratch[0]->encode, gte++); 2942c86e55dSMatthew Auld 2952c86e55dSMatthew Auld /* 2962c86e55dSMatthew Auld * We want to flush the TLBs only after we're certain all the PTE 2972c86e55dSMatthew Auld * updates have finished. 2982c86e55dSMatthew Auld */ 2992c86e55dSMatthew Auld ggtt->invalidate(ggtt); 3002c86e55dSMatthew Auld } 3012c86e55dSMatthew Auld 3022c86e55dSMatthew Auld static void nop_clear_range(struct i915_address_space *vm, 3032c86e55dSMatthew Auld u64 start, u64 length) 3042c86e55dSMatthew Auld { 3052c86e55dSMatthew Auld } 3062c86e55dSMatthew Auld 3072c86e55dSMatthew Auld static void gen8_ggtt_clear_range(struct i915_address_space *vm, 3082c86e55dSMatthew Auld u64 start, u64 length) 3092c86e55dSMatthew Auld { 3102c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 3112c86e55dSMatthew Auld unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 3122c86e55dSMatthew Auld unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 31389351925SChris Wilson const gen8_pte_t scratch_pte = vm->scratch[0]->encode; 3142c86e55dSMatthew Auld gen8_pte_t __iomem *gtt_base = 3152c86e55dSMatthew Auld (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 3162c86e55dSMatthew Auld const int max_entries = ggtt_total_entries(ggtt) - first_entry; 3172c86e55dSMatthew Auld int i; 3182c86e55dSMatthew Auld 3192c86e55dSMatthew Auld if (WARN(num_entries > max_entries, 3202c86e55dSMatthew Auld "First entry = %d; Num entries = %d (max=%d)\n", 3212c86e55dSMatthew Auld first_entry, num_entries, max_entries)) 3222c86e55dSMatthew Auld num_entries = max_entries; 3232c86e55dSMatthew Auld 3242c86e55dSMatthew Auld for (i = 0; i < num_entries; i++) 3252c86e55dSMatthew Auld gen8_set_pte(>t_base[i], scratch_pte); 3262c86e55dSMatthew Auld } 3272c86e55dSMatthew Auld 3282c86e55dSMatthew Auld static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) 3292c86e55dSMatthew Auld { 3302c86e55dSMatthew Auld /* 3312c86e55dSMatthew Auld * Make sure the internal GAM fifo has been cleared of all GTT 3322c86e55dSMatthew Auld * writes before exiting stop_machine(). This guarantees that 3332c86e55dSMatthew Auld * any aperture accesses waiting to start in another process 3342c86e55dSMatthew Auld * cannot back up behind the GTT writes causing a hang. 3352c86e55dSMatthew Auld * The register can be any arbitrary GAM register. 3362c86e55dSMatthew Auld */ 3372c86e55dSMatthew Auld intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); 3382c86e55dSMatthew Auld } 3392c86e55dSMatthew Auld 3402c86e55dSMatthew Auld struct insert_page { 3412c86e55dSMatthew Auld struct i915_address_space *vm; 3422c86e55dSMatthew Auld dma_addr_t addr; 3432c86e55dSMatthew Auld u64 offset; 3442c86e55dSMatthew Auld enum i915_cache_level level; 3452c86e55dSMatthew Auld }; 3462c86e55dSMatthew Auld 3472c86e55dSMatthew Auld static int bxt_vtd_ggtt_insert_page__cb(void *_arg) 3482c86e55dSMatthew Auld { 3492c86e55dSMatthew Auld struct insert_page *arg = _arg; 3502c86e55dSMatthew Auld 3512c86e55dSMatthew Auld gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); 3522c86e55dSMatthew Auld bxt_vtd_ggtt_wa(arg->vm); 3532c86e55dSMatthew Auld 3542c86e55dSMatthew Auld return 0; 3552c86e55dSMatthew Auld } 3562c86e55dSMatthew Auld 3572c86e55dSMatthew Auld static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, 3582c86e55dSMatthew Auld dma_addr_t addr, 3592c86e55dSMatthew Auld u64 offset, 3602c86e55dSMatthew Auld enum i915_cache_level level, 3612c86e55dSMatthew Auld u32 unused) 3622c86e55dSMatthew Auld { 3632c86e55dSMatthew Auld struct insert_page arg = { vm, addr, offset, level }; 3642c86e55dSMatthew Auld 3652c86e55dSMatthew Auld stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); 3662c86e55dSMatthew Auld } 3672c86e55dSMatthew Auld 3682c86e55dSMatthew Auld struct insert_entries { 3692c86e55dSMatthew Auld struct i915_address_space *vm; 3702c86e55dSMatthew Auld struct i915_vma *vma; 3712c86e55dSMatthew Auld enum i915_cache_level level; 3722c86e55dSMatthew Auld u32 flags; 3732c86e55dSMatthew Auld }; 3742c86e55dSMatthew Auld 3752c86e55dSMatthew Auld static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) 3762c86e55dSMatthew Auld { 3772c86e55dSMatthew Auld struct insert_entries *arg = _arg; 3782c86e55dSMatthew Auld 3792c86e55dSMatthew Auld gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); 3802c86e55dSMatthew Auld bxt_vtd_ggtt_wa(arg->vm); 3812c86e55dSMatthew Auld 3822c86e55dSMatthew Auld return 0; 3832c86e55dSMatthew Auld } 3842c86e55dSMatthew Auld 3852c86e55dSMatthew Auld static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, 3862c86e55dSMatthew Auld struct i915_vma *vma, 3872c86e55dSMatthew Auld enum i915_cache_level level, 3882c86e55dSMatthew Auld u32 flags) 3892c86e55dSMatthew Auld { 3902c86e55dSMatthew Auld struct insert_entries arg = { vm, vma, level, flags }; 3912c86e55dSMatthew Auld 3922c86e55dSMatthew Auld stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); 3932c86e55dSMatthew Auld } 3942c86e55dSMatthew Auld 3952c86e55dSMatthew Auld static void gen6_ggtt_clear_range(struct i915_address_space *vm, 3962c86e55dSMatthew Auld u64 start, u64 length) 3972c86e55dSMatthew Auld { 3982c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 3992c86e55dSMatthew Auld unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 4002c86e55dSMatthew Auld unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 4012c86e55dSMatthew Auld gen6_pte_t scratch_pte, __iomem *gtt_base = 4022c86e55dSMatthew Auld (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 4032c86e55dSMatthew Auld const int max_entries = ggtt_total_entries(ggtt) - first_entry; 4042c86e55dSMatthew Auld int i; 4052c86e55dSMatthew Auld 4062c86e55dSMatthew Auld if (WARN(num_entries > max_entries, 4072c86e55dSMatthew Auld "First entry = %d; Num entries = %d (max=%d)\n", 4082c86e55dSMatthew Auld first_entry, num_entries, max_entries)) 4092c86e55dSMatthew Auld num_entries = max_entries; 4102c86e55dSMatthew Auld 41189351925SChris Wilson scratch_pte = vm->scratch[0]->encode; 4122c86e55dSMatthew Auld for (i = 0; i < num_entries; i++) 4132c86e55dSMatthew Auld iowrite32(scratch_pte, >t_base[i]); 4142c86e55dSMatthew Auld } 4152c86e55dSMatthew Auld 4162c86e55dSMatthew Auld static void i915_ggtt_insert_page(struct i915_address_space *vm, 4172c86e55dSMatthew Auld dma_addr_t addr, 4182c86e55dSMatthew Auld u64 offset, 4192c86e55dSMatthew Auld enum i915_cache_level cache_level, 4202c86e55dSMatthew Auld u32 unused) 4212c86e55dSMatthew Auld { 4222c86e55dSMatthew Auld unsigned int flags = (cache_level == I915_CACHE_NONE) ? 4232c86e55dSMatthew Auld AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 4242c86e55dSMatthew Auld 4252c86e55dSMatthew Auld intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); 4262c86e55dSMatthew Auld } 4272c86e55dSMatthew Auld 4282c86e55dSMatthew Auld static void i915_ggtt_insert_entries(struct i915_address_space *vm, 4292c86e55dSMatthew Auld struct i915_vma *vma, 4302c86e55dSMatthew Auld enum i915_cache_level cache_level, 4312c86e55dSMatthew Auld u32 unused) 4322c86e55dSMatthew Auld { 4332c86e55dSMatthew Auld unsigned int flags = (cache_level == I915_CACHE_NONE) ? 4342c86e55dSMatthew Auld AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 4352c86e55dSMatthew Auld 4362c86e55dSMatthew Auld intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, 4372c86e55dSMatthew Auld flags); 4382c86e55dSMatthew Auld } 4392c86e55dSMatthew Auld 4402c86e55dSMatthew Auld static void i915_ggtt_clear_range(struct i915_address_space *vm, 4412c86e55dSMatthew Auld u64 start, u64 length) 4422c86e55dSMatthew Auld { 4432c86e55dSMatthew Auld intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); 4442c86e55dSMatthew Auld } 4452c86e55dSMatthew Auld 446cd0452aaSChris Wilson static void ggtt_bind_vma(struct i915_address_space *vm, 447cd0452aaSChris Wilson struct i915_vm_pt_stash *stash, 44812b07256SChris Wilson struct i915_vma *vma, 4492c86e55dSMatthew Auld enum i915_cache_level cache_level, 4502c86e55dSMatthew Auld u32 flags) 4512c86e55dSMatthew Auld { 4522c86e55dSMatthew Auld struct drm_i915_gem_object *obj = vma->obj; 4532c86e55dSMatthew Auld u32 pte_flags; 4542c86e55dSMatthew Auld 455bf0840cdSChris Wilson if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK)) 456cd0452aaSChris Wilson return; 457bf0840cdSChris Wilson 4582c86e55dSMatthew Auld /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ 4592c86e55dSMatthew Auld pte_flags = 0; 4602c86e55dSMatthew Auld if (i915_gem_object_is_readonly(obj)) 4612c86e55dSMatthew Auld pte_flags |= PTE_READ_ONLY; 4622c86e55dSMatthew Auld 46312b07256SChris Wilson vm->insert_entries(vm, vma, cache_level, pte_flags); 4642c86e55dSMatthew Auld vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 4652c86e55dSMatthew Auld } 4662c86e55dSMatthew Auld 46712b07256SChris Wilson static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) 4682c86e55dSMatthew Auld { 46912b07256SChris Wilson vm->clear_range(vm, vma->node.start, vma->size); 4702c86e55dSMatthew Auld } 4712c86e55dSMatthew Auld 4722c86e55dSMatthew Auld static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) 4732c86e55dSMatthew Auld { 4742c86e55dSMatthew Auld u64 size; 4752c86e55dSMatthew Auld int ret; 4762c86e55dSMatthew Auld 47734bbfde6SDaniele Ceraolo Spurio if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) 4782c86e55dSMatthew Auld return 0; 4792c86e55dSMatthew Auld 4802c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); 4812c86e55dSMatthew Auld size = ggtt->vm.total - GUC_GGTT_TOP; 4822c86e55dSMatthew Auld 4832c86e55dSMatthew Auld ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, 4842c86e55dSMatthew Auld GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, 4852c86e55dSMatthew Auld PIN_NOEVICT); 4862c86e55dSMatthew Auld if (ret) 48752ce7074SWambui Karuga drm_dbg(&ggtt->vm.i915->drm, 48852ce7074SWambui Karuga "Failed to reserve top of GGTT for GuC\n"); 4892c86e55dSMatthew Auld 4902c86e55dSMatthew Auld return ret; 4912c86e55dSMatthew Auld } 4922c86e55dSMatthew Auld 4932c86e55dSMatthew Auld static void ggtt_release_guc_top(struct i915_ggtt *ggtt) 4942c86e55dSMatthew Auld { 4952c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->uc_fw)) 4962c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->uc_fw); 4972c86e55dSMatthew Auld } 4982c86e55dSMatthew Auld 4992c86e55dSMatthew Auld static void cleanup_init_ggtt(struct i915_ggtt *ggtt) 5002c86e55dSMatthew Auld { 5012c86e55dSMatthew Auld ggtt_release_guc_top(ggtt); 5022c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->error_capture)) 5032c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->error_capture); 504742379c0SChris Wilson mutex_destroy(&ggtt->error_mutex); 5052c86e55dSMatthew Auld } 5062c86e55dSMatthew Auld 5072c86e55dSMatthew Auld static int init_ggtt(struct i915_ggtt *ggtt) 5082c86e55dSMatthew Auld { 5092c86e55dSMatthew Auld /* 5102c86e55dSMatthew Auld * Let GEM Manage all of the aperture. 5112c86e55dSMatthew Auld * 5122c86e55dSMatthew Auld * However, leave one page at the end still bound to the scratch page. 5132c86e55dSMatthew Auld * There are a number of places where the hardware apparently prefetches 5142c86e55dSMatthew Auld * past the end of the object, and we've seen multiple hangs with the 5152c86e55dSMatthew Auld * GPU head pointer stuck in a batchbuffer bound at the last page of the 5162c86e55dSMatthew Auld * aperture. One page should be enough to keep any prefetching inside 5172c86e55dSMatthew Auld * of the aperture. 5182c86e55dSMatthew Auld */ 5192c86e55dSMatthew Auld unsigned long hole_start, hole_end; 5202c86e55dSMatthew Auld struct drm_mm_node *entry; 5212c86e55dSMatthew Auld int ret; 5222c86e55dSMatthew Auld 5232c86e55dSMatthew Auld /* 5242c86e55dSMatthew Auld * GuC requires all resources that we're sharing with it to be placed in 5252c86e55dSMatthew Auld * non-WOPCM memory. If GuC is not present or not in use we still need a 5262c86e55dSMatthew Auld * small bias as ring wraparound at offset 0 sometimes hangs. No idea 5272c86e55dSMatthew Auld * why. 5282c86e55dSMatthew Auld */ 5292c86e55dSMatthew Auld ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, 5302c86e55dSMatthew Auld intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); 5312c86e55dSMatthew Auld 5322c86e55dSMatthew Auld ret = intel_vgt_balloon(ggtt); 5332c86e55dSMatthew Auld if (ret) 5342c86e55dSMatthew Auld return ret; 5352c86e55dSMatthew Auld 536742379c0SChris Wilson mutex_init(&ggtt->error_mutex); 5372c86e55dSMatthew Auld if (ggtt->mappable_end) { 538489140b5SChris Wilson /* 539489140b5SChris Wilson * Reserve a mappable slot for our lockless error capture. 540489140b5SChris Wilson * 541489140b5SChris Wilson * We strongly prefer taking address 0x0 in order to protect 542489140b5SChris Wilson * other critical buffers against accidental overwrites, 543489140b5SChris Wilson * as writing to address 0 is a very common mistake. 544489140b5SChris Wilson * 545489140b5SChris Wilson * Since 0 may already be in use by the system (e.g. the BIOS 546489140b5SChris Wilson * framebuffer), we let the reservation fail quietly and hope 547489140b5SChris Wilson * 0 remains reserved always. 548489140b5SChris Wilson * 549489140b5SChris Wilson * If we fail to reserve 0, and then fail to find any space 550489140b5SChris Wilson * for an error-capture, remain silent. We can afford not 551489140b5SChris Wilson * to reserve an error_capture node as we have fallback 552489140b5SChris Wilson * paths, and we trust that 0 will remain reserved. However, 553489140b5SChris Wilson * the only likely reason for failure to insert is a driver 554489140b5SChris Wilson * bug, which we expect to cause other failures... 555489140b5SChris Wilson */ 556489140b5SChris Wilson ggtt->error_capture.size = I915_GTT_PAGE_SIZE; 557489140b5SChris Wilson ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; 558489140b5SChris Wilson if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) 559489140b5SChris Wilson drm_mm_insert_node_in_range(&ggtt->vm.mm, 5602c86e55dSMatthew Auld &ggtt->error_capture, 561489140b5SChris Wilson ggtt->error_capture.size, 0, 562489140b5SChris Wilson ggtt->error_capture.color, 5632c86e55dSMatthew Auld 0, ggtt->mappable_end, 5642c86e55dSMatthew Auld DRM_MM_INSERT_LOW); 5652c86e55dSMatthew Auld } 566489140b5SChris Wilson if (drm_mm_node_allocated(&ggtt->error_capture)) 567489140b5SChris Wilson drm_dbg(&ggtt->vm.i915->drm, 568489140b5SChris Wilson "Reserved GGTT:[%llx, %llx] for use by error capture\n", 569489140b5SChris Wilson ggtt->error_capture.start, 570489140b5SChris Wilson ggtt->error_capture.start + ggtt->error_capture.size); 5712c86e55dSMatthew Auld 5722c86e55dSMatthew Auld /* 5732c86e55dSMatthew Auld * The upper portion of the GuC address space has a sizeable hole 5742c86e55dSMatthew Auld * (several MB) that is inaccessible by GuC. Reserve this range within 5752c86e55dSMatthew Auld * GGTT as it can comfortably hold GuC/HuC firmware images. 5762c86e55dSMatthew Auld */ 5772c86e55dSMatthew Auld ret = ggtt_reserve_guc_top(ggtt); 5782c86e55dSMatthew Auld if (ret) 5792c86e55dSMatthew Auld goto err; 5802c86e55dSMatthew Auld 5812c86e55dSMatthew Auld /* Clear any non-preallocated blocks */ 5822c86e55dSMatthew Auld drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { 583489140b5SChris Wilson drm_dbg(&ggtt->vm.i915->drm, 58452ce7074SWambui Karuga "clearing unused GTT space: [%lx, %lx]\n", 5852c86e55dSMatthew Auld hole_start, hole_end); 5862c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, hole_start, 5872c86e55dSMatthew Auld hole_end - hole_start); 5882c86e55dSMatthew Auld } 5892c86e55dSMatthew Auld 5902c86e55dSMatthew Auld /* And finally clear the reserved guard page */ 5912c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 5922c86e55dSMatthew Auld 5932c86e55dSMatthew Auld return 0; 5942c86e55dSMatthew Auld 5952c86e55dSMatthew Auld err: 5962c86e55dSMatthew Auld cleanup_init_ggtt(ggtt); 5972c86e55dSMatthew Auld return ret; 5982c86e55dSMatthew Auld } 5992c86e55dSMatthew Auld 600cd0452aaSChris Wilson static void aliasing_gtt_bind_vma(struct i915_address_space *vm, 601cd0452aaSChris Wilson struct i915_vm_pt_stash *stash, 60212b07256SChris Wilson struct i915_vma *vma, 6032c86e55dSMatthew Auld enum i915_cache_level cache_level, 6042c86e55dSMatthew Auld u32 flags) 6052c86e55dSMatthew Auld { 6062c86e55dSMatthew Auld u32 pte_flags; 6072c86e55dSMatthew Auld 6082c86e55dSMatthew Auld /* Currently applicable only to VLV */ 6092c86e55dSMatthew Auld pte_flags = 0; 6102c86e55dSMatthew Auld if (i915_gem_object_is_readonly(vma->obj)) 6112c86e55dSMatthew Auld pte_flags |= PTE_READ_ONLY; 6122c86e55dSMatthew Auld 613cd0452aaSChris Wilson if (flags & I915_VMA_LOCAL_BIND) 614cd0452aaSChris Wilson ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, 615cd0452aaSChris Wilson stash, vma, cache_level, flags); 6162c86e55dSMatthew Auld 617c0e60347SChris Wilson if (flags & I915_VMA_GLOBAL_BIND) 61812b07256SChris Wilson vm->insert_entries(vm, vma, cache_level, pte_flags); 6192c86e55dSMatthew Auld } 6202c86e55dSMatthew Auld 62112b07256SChris Wilson static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, 62212b07256SChris Wilson struct i915_vma *vma) 6232c86e55dSMatthew Auld { 62412b07256SChris Wilson if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 6252c86e55dSMatthew Auld vm->clear_range(vm, vma->node.start, vma->size); 6262c86e55dSMatthew Auld 62712b07256SChris Wilson if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) 62812b07256SChris Wilson ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); 6292c86e55dSMatthew Auld } 6302c86e55dSMatthew Auld 6312c86e55dSMatthew Auld static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) 6322c86e55dSMatthew Auld { 633cd0452aaSChris Wilson struct i915_vm_pt_stash stash = {}; 6342c86e55dSMatthew Auld struct i915_ppgtt *ppgtt; 6352c86e55dSMatthew Auld int err; 6362c86e55dSMatthew Auld 6372c86e55dSMatthew Auld ppgtt = i915_ppgtt_create(ggtt->vm.gt); 6382c86e55dSMatthew Auld if (IS_ERR(ppgtt)) 6392c86e55dSMatthew Auld return PTR_ERR(ppgtt); 6402c86e55dSMatthew Auld 6412c86e55dSMatthew Auld if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { 6422c86e55dSMatthew Auld err = -ENODEV; 6432c86e55dSMatthew Auld goto err_ppgtt; 6442c86e55dSMatthew Auld } 6452c86e55dSMatthew Auld 646cd0452aaSChris Wilson err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); 647cd0452aaSChris Wilson if (err) 648cd0452aaSChris Wilson goto err_ppgtt; 649cd0452aaSChris Wilson 65089351925SChris Wilson err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); 65189351925SChris Wilson if (err) 65289351925SChris Wilson goto err_stash; 65389351925SChris Wilson 6542c86e55dSMatthew Auld /* 6552c86e55dSMatthew Auld * Note we only pre-allocate as far as the end of the global 6562c86e55dSMatthew Auld * GTT. On 48b / 4-level page-tables, the difference is very, 6572c86e55dSMatthew Auld * very significant! We have to preallocate as GVT/vgpu does 6582c86e55dSMatthew Auld * not like the page directory disappearing. 6592c86e55dSMatthew Auld */ 660cd0452aaSChris Wilson ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); 6612c86e55dSMatthew Auld 6622c86e55dSMatthew Auld ggtt->alias = ppgtt; 6632c86e55dSMatthew Auld ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; 6642c86e55dSMatthew Auld 6652c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); 6662c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; 6672c86e55dSMatthew Auld 6682c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); 6692c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; 6702c86e55dSMatthew Auld 671cd0452aaSChris Wilson i915_vm_free_pt_stash(&ppgtt->vm, &stash); 6722c86e55dSMatthew Auld return 0; 6732c86e55dSMatthew Auld 67489351925SChris Wilson err_stash: 67589351925SChris Wilson i915_vm_free_pt_stash(&ppgtt->vm, &stash); 6762c86e55dSMatthew Auld err_ppgtt: 6772c86e55dSMatthew Auld i915_vm_put(&ppgtt->vm); 6782c86e55dSMatthew Auld return err; 6792c86e55dSMatthew Auld } 6802c86e55dSMatthew Auld 6812c86e55dSMatthew Auld static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) 6822c86e55dSMatthew Auld { 6832c86e55dSMatthew Auld struct i915_ppgtt *ppgtt; 6842c86e55dSMatthew Auld 6852c86e55dSMatthew Auld ppgtt = fetch_and_zero(&ggtt->alias); 6862c86e55dSMatthew Auld if (!ppgtt) 6872c86e55dSMatthew Auld return; 6882c86e55dSMatthew Auld 6892c86e55dSMatthew Auld i915_vm_put(&ppgtt->vm); 6902c86e55dSMatthew Auld 6912c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 6922c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 6932c86e55dSMatthew Auld } 6942c86e55dSMatthew Auld 6952c86e55dSMatthew Auld int i915_init_ggtt(struct drm_i915_private *i915) 6962c86e55dSMatthew Auld { 6972c86e55dSMatthew Auld int ret; 6982c86e55dSMatthew Auld 6992c86e55dSMatthew Auld ret = init_ggtt(&i915->ggtt); 7002c86e55dSMatthew Auld if (ret) 7012c86e55dSMatthew Auld return ret; 7022c86e55dSMatthew Auld 7032c86e55dSMatthew Auld if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { 7042c86e55dSMatthew Auld ret = init_aliasing_ppgtt(&i915->ggtt); 7052c86e55dSMatthew Auld if (ret) 7062c86e55dSMatthew Auld cleanup_init_ggtt(&i915->ggtt); 7072c86e55dSMatthew Auld } 7082c86e55dSMatthew Auld 7092c86e55dSMatthew Auld return 0; 7102c86e55dSMatthew Auld } 7112c86e55dSMatthew Auld 7122c86e55dSMatthew Auld static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) 7132c86e55dSMatthew Auld { 7142c86e55dSMatthew Auld struct i915_vma *vma, *vn; 7152c86e55dSMatthew Auld 7162c86e55dSMatthew Auld atomic_set(&ggtt->vm.open, 0); 7172c86e55dSMatthew Auld 7182c86e55dSMatthew Auld rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ 7192c86e55dSMatthew Auld flush_workqueue(ggtt->vm.i915->wq); 7202c86e55dSMatthew Auld 7212c86e55dSMatthew Auld mutex_lock(&ggtt->vm.mutex); 7222c86e55dSMatthew Auld 7232c86e55dSMatthew Auld list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) 7242c86e55dSMatthew Auld WARN_ON(__i915_vma_unbind(vma)); 7252c86e55dSMatthew Auld 7262c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->error_capture)) 7272c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->error_capture); 728742379c0SChris Wilson mutex_destroy(&ggtt->error_mutex); 7292c86e55dSMatthew Auld 7302c86e55dSMatthew Auld ggtt_release_guc_top(ggtt); 7312c86e55dSMatthew Auld intel_vgt_deballoon(ggtt); 7322c86e55dSMatthew Auld 7332c86e55dSMatthew Auld ggtt->vm.cleanup(&ggtt->vm); 7342c86e55dSMatthew Auld 7352c86e55dSMatthew Auld mutex_unlock(&ggtt->vm.mutex); 7362c86e55dSMatthew Auld i915_address_space_fini(&ggtt->vm); 7372c86e55dSMatthew Auld 7382c86e55dSMatthew Auld arch_phys_wc_del(ggtt->mtrr); 7392c86e55dSMatthew Auld 7402c86e55dSMatthew Auld if (ggtt->iomap.size) 7412c86e55dSMatthew Auld io_mapping_fini(&ggtt->iomap); 7422c86e55dSMatthew Auld } 7432c86e55dSMatthew Auld 7442c86e55dSMatthew Auld /** 7452c86e55dSMatthew Auld * i915_ggtt_driver_release - Clean up GGTT hardware initialization 7462c86e55dSMatthew Auld * @i915: i915 device 7472c86e55dSMatthew Auld */ 7482c86e55dSMatthew Auld void i915_ggtt_driver_release(struct drm_i915_private *i915) 7492c86e55dSMatthew Auld { 7500b6bc81dSChris Wilson struct i915_ggtt *ggtt = &i915->ggtt; 7512c86e55dSMatthew Auld 7520b6bc81dSChris Wilson fini_aliasing_ppgtt(ggtt); 7532c86e55dSMatthew Auld 7540b6bc81dSChris Wilson intel_ggtt_fini_fences(ggtt); 7550b6bc81dSChris Wilson ggtt_cleanup_hw(ggtt); 7562c86e55dSMatthew Auld } 7572c86e55dSMatthew Auld 7582c86e55dSMatthew Auld static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 7592c86e55dSMatthew Auld { 7602c86e55dSMatthew Auld snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 7612c86e55dSMatthew Auld snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 7622c86e55dSMatthew Auld return snb_gmch_ctl << 20; 7632c86e55dSMatthew Auld } 7642c86e55dSMatthew Auld 7652c86e55dSMatthew Auld static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 7662c86e55dSMatthew Auld { 7672c86e55dSMatthew Auld bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 7682c86e55dSMatthew Auld bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 7692c86e55dSMatthew Auld if (bdw_gmch_ctl) 7702c86e55dSMatthew Auld bdw_gmch_ctl = 1 << bdw_gmch_ctl; 7712c86e55dSMatthew Auld 7722c86e55dSMatthew Auld #ifdef CONFIG_X86_32 7732c86e55dSMatthew Auld /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ 7742c86e55dSMatthew Auld if (bdw_gmch_ctl > 4) 7752c86e55dSMatthew Auld bdw_gmch_ctl = 4; 7762c86e55dSMatthew Auld #endif 7772c86e55dSMatthew Auld 7782c86e55dSMatthew Auld return bdw_gmch_ctl << 20; 7792c86e55dSMatthew Auld } 7802c86e55dSMatthew Auld 7812c86e55dSMatthew Auld static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 7822c86e55dSMatthew Auld { 7832c86e55dSMatthew Auld gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 7842c86e55dSMatthew Auld gmch_ctrl &= SNB_GMCH_GGMS_MASK; 7852c86e55dSMatthew Auld 7862c86e55dSMatthew Auld if (gmch_ctrl) 7872c86e55dSMatthew Auld return 1 << (20 + gmch_ctrl); 7882c86e55dSMatthew Auld 7892c86e55dSMatthew Auld return 0; 7902c86e55dSMatthew Auld } 7912c86e55dSMatthew Auld 7922c86e55dSMatthew Auld static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) 7932c86e55dSMatthew Auld { 7942c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 795e322551fSThomas Zimmermann struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 7962c86e55dSMatthew Auld phys_addr_t phys_addr; 7972c86e55dSMatthew Auld int ret; 7982c86e55dSMatthew Auld 7992c86e55dSMatthew Auld /* For Modern GENs the PTEs and register space are split in the BAR */ 8002c86e55dSMatthew Auld phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; 8012c86e55dSMatthew Auld 8022c86e55dSMatthew Auld /* 8032c86e55dSMatthew Auld * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range 8042c86e55dSMatthew Auld * will be dropped. For WC mappings in general we have 64 byte burst 8052c86e55dSMatthew Auld * writes when the WC buffer is flushed, so we can't use it, but have to 8062c86e55dSMatthew Auld * resort to an uncached mapping. The WC issue is easily caught by the 8072c86e55dSMatthew Auld * readback check when writing GTT PTE entries. 8082c86e55dSMatthew Auld */ 8092c86e55dSMatthew Auld if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) 8109f68e365SLinus Torvalds ggtt->gsm = ioremap(phys_addr, size); 8112c86e55dSMatthew Auld else 8122c86e55dSMatthew Auld ggtt->gsm = ioremap_wc(phys_addr, size); 8132c86e55dSMatthew Auld if (!ggtt->gsm) { 81436034c95SWambui Karuga drm_err(&i915->drm, "Failed to map the ggtt page table\n"); 8152c86e55dSMatthew Auld return -ENOMEM; 8162c86e55dSMatthew Auld } 8172c86e55dSMatthew Auld 81889351925SChris Wilson ret = setup_scratch_page(&ggtt->vm); 8192c86e55dSMatthew Auld if (ret) { 82036034c95SWambui Karuga drm_err(&i915->drm, "Scratch setup failed\n"); 8212c86e55dSMatthew Auld /* iounmap will also get called at remove, but meh */ 8222c86e55dSMatthew Auld iounmap(ggtt->gsm); 8232c86e55dSMatthew Auld return ret; 8242c86e55dSMatthew Auld } 8252c86e55dSMatthew Auld 82689351925SChris Wilson ggtt->vm.scratch[0]->encode = 82789351925SChris Wilson ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), 8282c86e55dSMatthew Auld I915_CACHE_NONE, 0); 8292c86e55dSMatthew Auld 8302c86e55dSMatthew Auld return 0; 8312c86e55dSMatthew Auld } 8322c86e55dSMatthew Auld 8332c86e55dSMatthew Auld int ggtt_set_pages(struct i915_vma *vma) 8342c86e55dSMatthew Auld { 8352c86e55dSMatthew Auld int ret; 8362c86e55dSMatthew Auld 8372c86e55dSMatthew Auld GEM_BUG_ON(vma->pages); 8382c86e55dSMatthew Auld 8392c86e55dSMatthew Auld ret = i915_get_ggtt_vma_pages(vma); 8402c86e55dSMatthew Auld if (ret) 8412c86e55dSMatthew Auld return ret; 8422c86e55dSMatthew Auld 8432c86e55dSMatthew Auld vma->page_sizes = vma->obj->mm.page_sizes; 8442c86e55dSMatthew Auld 8452c86e55dSMatthew Auld return 0; 8462c86e55dSMatthew Auld } 8472c86e55dSMatthew Auld 8482c86e55dSMatthew Auld static void gen6_gmch_remove(struct i915_address_space *vm) 8492c86e55dSMatthew Auld { 8502c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 8512c86e55dSMatthew Auld 8522c86e55dSMatthew Auld iounmap(ggtt->gsm); 85389351925SChris Wilson free_scratch(vm); 8542c86e55dSMatthew Auld } 8552c86e55dSMatthew Auld 8562c86e55dSMatthew Auld static struct resource pci_resource(struct pci_dev *pdev, int bar) 8572c86e55dSMatthew Auld { 8582c86e55dSMatthew Auld return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), 8592c86e55dSMatthew Auld pci_resource_len(pdev, bar)); 8602c86e55dSMatthew Auld } 8612c86e55dSMatthew Auld 8622c86e55dSMatthew Auld static int gen8_gmch_probe(struct i915_ggtt *ggtt) 8632c86e55dSMatthew Auld { 8642c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 865e322551fSThomas Zimmermann struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 8662c86e55dSMatthew Auld unsigned int size; 8672c86e55dSMatthew Auld u16 snb_gmch_ctl; 8682c86e55dSMatthew Auld 8692c86e55dSMatthew Auld /* TODO: We're not aware of mappable constraints on gen8 yet */ 870b1e93a85SLucas De Marchi if (!HAS_LMEM(i915)) { 8712c86e55dSMatthew Auld ggtt->gmadr = pci_resource(pdev, 2); 8722c86e55dSMatthew Auld ggtt->mappable_end = resource_size(&ggtt->gmadr); 8732c86e55dSMatthew Auld } 8742c86e55dSMatthew Auld 8752c86e55dSMatthew Auld pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 8762c86e55dSMatthew Auld if (IS_CHERRYVIEW(i915)) 8772c86e55dSMatthew Auld size = chv_get_total_gtt_size(snb_gmch_ctl); 8782c86e55dSMatthew Auld else 8792c86e55dSMatthew Auld size = gen8_get_total_gtt_size(snb_gmch_ctl); 8802c86e55dSMatthew Auld 88189351925SChris Wilson ggtt->vm.alloc_pt_dma = alloc_pt_dma; 88289351925SChris Wilson 8832c86e55dSMatthew Auld ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; 8842c86e55dSMatthew Auld ggtt->vm.cleanup = gen6_gmch_remove; 8852c86e55dSMatthew Auld ggtt->vm.insert_page = gen8_ggtt_insert_page; 8862c86e55dSMatthew Auld ggtt->vm.clear_range = nop_clear_range; 8872c86e55dSMatthew Auld if (intel_scanout_needs_vtd_wa(i915)) 8882c86e55dSMatthew Auld ggtt->vm.clear_range = gen8_ggtt_clear_range; 8892c86e55dSMatthew Auld 8902c86e55dSMatthew Auld ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 8912c86e55dSMatthew Auld 8922c86e55dSMatthew Auld /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ 8932c86e55dSMatthew Auld if (intel_ggtt_update_needs_vtd_wa(i915) || 8942c86e55dSMatthew Auld IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { 8952c86e55dSMatthew Auld ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; 8962c86e55dSMatthew Auld ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 897a34f61d2SChris Wilson ggtt->vm.bind_async_flags = 898a34f61d2SChris Wilson I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 8992c86e55dSMatthew Auld } 9002c86e55dSMatthew Auld 9012c86e55dSMatthew Auld ggtt->invalidate = gen8_ggtt_invalidate; 9022c86e55dSMatthew Auld 9032c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 9042c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 9052c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 9062c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 9072c86e55dSMatthew Auld 90869edc390SDaniele Ceraolo Spurio ggtt->vm.pte_encode = gen8_ggtt_pte_encode; 9092c86e55dSMatthew Auld 9102c86e55dSMatthew Auld setup_private_pat(ggtt->vm.gt->uncore); 9112c86e55dSMatthew Auld 9122c86e55dSMatthew Auld return ggtt_probe_common(ggtt, size); 9132c86e55dSMatthew Auld } 9142c86e55dSMatthew Auld 9152c86e55dSMatthew Auld static u64 snb_pte_encode(dma_addr_t addr, 9162c86e55dSMatthew Auld enum i915_cache_level level, 9172c86e55dSMatthew Auld u32 flags) 9182c86e55dSMatthew Auld { 9192c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9202c86e55dSMatthew Auld 9212c86e55dSMatthew Auld switch (level) { 9222c86e55dSMatthew Auld case I915_CACHE_L3_LLC: 9232c86e55dSMatthew Auld case I915_CACHE_LLC: 9242c86e55dSMatthew Auld pte |= GEN6_PTE_CACHE_LLC; 9252c86e55dSMatthew Auld break; 9262c86e55dSMatthew Auld case I915_CACHE_NONE: 9272c86e55dSMatthew Auld pte |= GEN6_PTE_UNCACHED; 9282c86e55dSMatthew Auld break; 9292c86e55dSMatthew Auld default: 9302c86e55dSMatthew Auld MISSING_CASE(level); 9312c86e55dSMatthew Auld } 9322c86e55dSMatthew Auld 9332c86e55dSMatthew Auld return pte; 9342c86e55dSMatthew Auld } 9352c86e55dSMatthew Auld 9362c86e55dSMatthew Auld static u64 ivb_pte_encode(dma_addr_t addr, 9372c86e55dSMatthew Auld enum i915_cache_level level, 9382c86e55dSMatthew Auld u32 flags) 9392c86e55dSMatthew Auld { 9402c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9412c86e55dSMatthew Auld 9422c86e55dSMatthew Auld switch (level) { 9432c86e55dSMatthew Auld case I915_CACHE_L3_LLC: 9442c86e55dSMatthew Auld pte |= GEN7_PTE_CACHE_L3_LLC; 9452c86e55dSMatthew Auld break; 9462c86e55dSMatthew Auld case I915_CACHE_LLC: 9472c86e55dSMatthew Auld pte |= GEN6_PTE_CACHE_LLC; 9482c86e55dSMatthew Auld break; 9492c86e55dSMatthew Auld case I915_CACHE_NONE: 9502c86e55dSMatthew Auld pte |= GEN6_PTE_UNCACHED; 9512c86e55dSMatthew Auld break; 9522c86e55dSMatthew Auld default: 9532c86e55dSMatthew Auld MISSING_CASE(level); 9542c86e55dSMatthew Auld } 9552c86e55dSMatthew Auld 9562c86e55dSMatthew Auld return pte; 9572c86e55dSMatthew Auld } 9582c86e55dSMatthew Auld 9592c86e55dSMatthew Auld static u64 byt_pte_encode(dma_addr_t addr, 9602c86e55dSMatthew Auld enum i915_cache_level level, 9612c86e55dSMatthew Auld u32 flags) 9622c86e55dSMatthew Auld { 9632c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9642c86e55dSMatthew Auld 9652c86e55dSMatthew Auld if (!(flags & PTE_READ_ONLY)) 9662c86e55dSMatthew Auld pte |= BYT_PTE_WRITEABLE; 9672c86e55dSMatthew Auld 9682c86e55dSMatthew Auld if (level != I915_CACHE_NONE) 9692c86e55dSMatthew Auld pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 9702c86e55dSMatthew Auld 9712c86e55dSMatthew Auld return pte; 9722c86e55dSMatthew Auld } 9732c86e55dSMatthew Auld 9742c86e55dSMatthew Auld static u64 hsw_pte_encode(dma_addr_t addr, 9752c86e55dSMatthew Auld enum i915_cache_level level, 9762c86e55dSMatthew Auld u32 flags) 9772c86e55dSMatthew Auld { 9782c86e55dSMatthew Auld gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9792c86e55dSMatthew Auld 9802c86e55dSMatthew Auld if (level != I915_CACHE_NONE) 9812c86e55dSMatthew Auld pte |= HSW_WB_LLC_AGE3; 9822c86e55dSMatthew Auld 9832c86e55dSMatthew Auld return pte; 9842c86e55dSMatthew Auld } 9852c86e55dSMatthew Auld 9862c86e55dSMatthew Auld static u64 iris_pte_encode(dma_addr_t addr, 9872c86e55dSMatthew Auld enum i915_cache_level level, 9882c86e55dSMatthew Auld u32 flags) 9892c86e55dSMatthew Auld { 9902c86e55dSMatthew Auld gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9912c86e55dSMatthew Auld 9922c86e55dSMatthew Auld switch (level) { 9932c86e55dSMatthew Auld case I915_CACHE_NONE: 9942c86e55dSMatthew Auld break; 9952c86e55dSMatthew Auld case I915_CACHE_WT: 9962c86e55dSMatthew Auld pte |= HSW_WT_ELLC_LLC_AGE3; 9972c86e55dSMatthew Auld break; 9982c86e55dSMatthew Auld default: 9992c86e55dSMatthew Auld pte |= HSW_WB_ELLC_LLC_AGE3; 10002c86e55dSMatthew Auld break; 10012c86e55dSMatthew Auld } 10022c86e55dSMatthew Auld 10032c86e55dSMatthew Auld return pte; 10042c86e55dSMatthew Auld } 10052c86e55dSMatthew Auld 10062c86e55dSMatthew Auld static int gen6_gmch_probe(struct i915_ggtt *ggtt) 10072c86e55dSMatthew Auld { 10082c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 1009e322551fSThomas Zimmermann struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 10102c86e55dSMatthew Auld unsigned int size; 10112c86e55dSMatthew Auld u16 snb_gmch_ctl; 10122c86e55dSMatthew Auld 10132c86e55dSMatthew Auld ggtt->gmadr = pci_resource(pdev, 2); 10142c86e55dSMatthew Auld ggtt->mappable_end = resource_size(&ggtt->gmadr); 10152c86e55dSMatthew Auld 10162c86e55dSMatthew Auld /* 10172c86e55dSMatthew Auld * 64/512MB is the current min/max we actually know of, but this is 10182c86e55dSMatthew Auld * just a coarse sanity check. 10192c86e55dSMatthew Auld */ 10202c86e55dSMatthew Auld if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { 102136034c95SWambui Karuga drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", 102236034c95SWambui Karuga &ggtt->mappable_end); 10232c86e55dSMatthew Auld return -ENXIO; 10242c86e55dSMatthew Auld } 10252c86e55dSMatthew Auld 10262c86e55dSMatthew Auld pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 10272c86e55dSMatthew Auld 10282c86e55dSMatthew Auld size = gen6_get_total_gtt_size(snb_gmch_ctl); 10292c86e55dSMatthew Auld ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; 10302c86e55dSMatthew Auld 103189351925SChris Wilson ggtt->vm.alloc_pt_dma = alloc_pt_dma; 103289351925SChris Wilson 10332c86e55dSMatthew Auld ggtt->vm.clear_range = nop_clear_range; 10342c86e55dSMatthew Auld if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) 10352c86e55dSMatthew Auld ggtt->vm.clear_range = gen6_ggtt_clear_range; 10362c86e55dSMatthew Auld ggtt->vm.insert_page = gen6_ggtt_insert_page; 10372c86e55dSMatthew Auld ggtt->vm.insert_entries = gen6_ggtt_insert_entries; 10382c86e55dSMatthew Auld ggtt->vm.cleanup = gen6_gmch_remove; 10392c86e55dSMatthew Auld 10402c86e55dSMatthew Auld ggtt->invalidate = gen6_ggtt_invalidate; 10412c86e55dSMatthew Auld 10422c86e55dSMatthew Auld if (HAS_EDRAM(i915)) 10432c86e55dSMatthew Auld ggtt->vm.pte_encode = iris_pte_encode; 10442c86e55dSMatthew Auld else if (IS_HASWELL(i915)) 10452c86e55dSMatthew Auld ggtt->vm.pte_encode = hsw_pte_encode; 10462c86e55dSMatthew Auld else if (IS_VALLEYVIEW(i915)) 10472c86e55dSMatthew Auld ggtt->vm.pte_encode = byt_pte_encode; 10482c86e55dSMatthew Auld else if (INTEL_GEN(i915) >= 7) 10492c86e55dSMatthew Auld ggtt->vm.pte_encode = ivb_pte_encode; 10502c86e55dSMatthew Auld else 10512c86e55dSMatthew Auld ggtt->vm.pte_encode = snb_pte_encode; 10522c86e55dSMatthew Auld 10532c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 10542c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 10552c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 10562c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 10572c86e55dSMatthew Auld 10582c86e55dSMatthew Auld return ggtt_probe_common(ggtt, size); 10592c86e55dSMatthew Auld } 10602c86e55dSMatthew Auld 10612c86e55dSMatthew Auld static void i915_gmch_remove(struct i915_address_space *vm) 10622c86e55dSMatthew Auld { 10632c86e55dSMatthew Auld intel_gmch_remove(); 10642c86e55dSMatthew Auld } 10652c86e55dSMatthew Auld 10662c86e55dSMatthew Auld static int i915_gmch_probe(struct i915_ggtt *ggtt) 10672c86e55dSMatthew Auld { 10682c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 10692c86e55dSMatthew Auld phys_addr_t gmadr_base; 10702c86e55dSMatthew Auld int ret; 10712c86e55dSMatthew Auld 1072e322551fSThomas Zimmermann ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL); 10732c86e55dSMatthew Auld if (!ret) { 107436034c95SWambui Karuga drm_err(&i915->drm, "failed to set up gmch\n"); 10752c86e55dSMatthew Auld return -EIO; 10762c86e55dSMatthew Auld } 10772c86e55dSMatthew Auld 10782c86e55dSMatthew Auld intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); 10792c86e55dSMatthew Auld 10802c86e55dSMatthew Auld ggtt->gmadr = 10812c86e55dSMatthew Auld (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); 10822c86e55dSMatthew Auld 108389351925SChris Wilson ggtt->vm.alloc_pt_dma = alloc_pt_dma; 108489351925SChris Wilson 108584361529SChris Wilson if (needs_idle_maps(i915)) { 108684361529SChris Wilson drm_notice(&i915->drm, 108784361529SChris Wilson "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n"); 108884361529SChris Wilson ggtt->do_idle_maps = true; 108984361529SChris Wilson } 109084361529SChris Wilson 10912c86e55dSMatthew Auld ggtt->vm.insert_page = i915_ggtt_insert_page; 10922c86e55dSMatthew Auld ggtt->vm.insert_entries = i915_ggtt_insert_entries; 10932c86e55dSMatthew Auld ggtt->vm.clear_range = i915_ggtt_clear_range; 10942c86e55dSMatthew Auld ggtt->vm.cleanup = i915_gmch_remove; 10952c86e55dSMatthew Auld 10962c86e55dSMatthew Auld ggtt->invalidate = gmch_ggtt_invalidate; 10972c86e55dSMatthew Auld 10982c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 10992c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 11002c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 11012c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 11022c86e55dSMatthew Auld 11032c86e55dSMatthew Auld if (unlikely(ggtt->do_idle_maps)) 1104dc483ba5SJani Nikula drm_notice(&i915->drm, 11052c86e55dSMatthew Auld "Applying Ironlake quirks for intel_iommu\n"); 11062c86e55dSMatthew Auld 11072c86e55dSMatthew Auld return 0; 11082c86e55dSMatthew Auld } 11092c86e55dSMatthew Auld 11102c86e55dSMatthew Auld static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) 11112c86e55dSMatthew Auld { 11122c86e55dSMatthew Auld struct drm_i915_private *i915 = gt->i915; 11132c86e55dSMatthew Auld int ret; 11142c86e55dSMatthew Auld 11152c86e55dSMatthew Auld ggtt->vm.gt = gt; 11162c86e55dSMatthew Auld ggtt->vm.i915 = i915; 1117e322551fSThomas Zimmermann ggtt->vm.dma = i915->drm.dev; 11182c86e55dSMatthew Auld 11192c86e55dSMatthew Auld if (INTEL_GEN(i915) <= 5) 11202c86e55dSMatthew Auld ret = i915_gmch_probe(ggtt); 11212c86e55dSMatthew Auld else if (INTEL_GEN(i915) < 8) 11222c86e55dSMatthew Auld ret = gen6_gmch_probe(ggtt); 11232c86e55dSMatthew Auld else 11242c86e55dSMatthew Auld ret = gen8_gmch_probe(ggtt); 11252c86e55dSMatthew Auld if (ret) 11262c86e55dSMatthew Auld return ret; 11272c86e55dSMatthew Auld 11282c86e55dSMatthew Auld if ((ggtt->vm.total - 1) >> 32) { 112936034c95SWambui Karuga drm_err(&i915->drm, 113036034c95SWambui Karuga "We never expected a Global GTT with more than 32bits" 11312c86e55dSMatthew Auld " of address space! Found %lldM!\n", 11322c86e55dSMatthew Auld ggtt->vm.total >> 20); 11332c86e55dSMatthew Auld ggtt->vm.total = 1ULL << 32; 11342c86e55dSMatthew Auld ggtt->mappable_end = 11352c86e55dSMatthew Auld min_t(u64, ggtt->mappable_end, ggtt->vm.total); 11362c86e55dSMatthew Auld } 11372c86e55dSMatthew Auld 11382c86e55dSMatthew Auld if (ggtt->mappable_end > ggtt->vm.total) { 113936034c95SWambui Karuga drm_err(&i915->drm, 114036034c95SWambui Karuga "mappable aperture extends past end of GGTT," 11412c86e55dSMatthew Auld " aperture=%pa, total=%llx\n", 11422c86e55dSMatthew Auld &ggtt->mappable_end, ggtt->vm.total); 11432c86e55dSMatthew Auld ggtt->mappable_end = ggtt->vm.total; 11442c86e55dSMatthew Auld } 11452c86e55dSMatthew Auld 11462c86e55dSMatthew Auld /* GMADR is the PCI mmio aperture into the global GTT. */ 114736034c95SWambui Karuga drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); 114836034c95SWambui Karuga drm_dbg(&i915->drm, "GMADR size = %lluM\n", 114936034c95SWambui Karuga (u64)ggtt->mappable_end >> 20); 115036034c95SWambui Karuga drm_dbg(&i915->drm, "DSM size = %lluM\n", 11512c86e55dSMatthew Auld (u64)resource_size(&intel_graphics_stolen_res) >> 20); 11522c86e55dSMatthew Auld 11532c86e55dSMatthew Auld return 0; 11542c86e55dSMatthew Auld } 11552c86e55dSMatthew Auld 11562c86e55dSMatthew Auld /** 11572c86e55dSMatthew Auld * i915_ggtt_probe_hw - Probe GGTT hardware location 11582c86e55dSMatthew Auld * @i915: i915 device 11592c86e55dSMatthew Auld */ 11602c86e55dSMatthew Auld int i915_ggtt_probe_hw(struct drm_i915_private *i915) 11612c86e55dSMatthew Auld { 11622c86e55dSMatthew Auld int ret; 11632c86e55dSMatthew Auld 11642c86e55dSMatthew Auld ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); 11652c86e55dSMatthew Auld if (ret) 11662c86e55dSMatthew Auld return ret; 11672c86e55dSMatthew Auld 11682c86e55dSMatthew Auld if (intel_vtd_active()) 1169dc483ba5SJani Nikula drm_info(&i915->drm, "VT-d active for gfx access\n"); 11702c86e55dSMatthew Auld 11712c86e55dSMatthew Auld return 0; 11722c86e55dSMatthew Auld } 11732c86e55dSMatthew Auld 11742c86e55dSMatthew Auld int i915_ggtt_enable_hw(struct drm_i915_private *i915) 11752c86e55dSMatthew Auld { 11762c86e55dSMatthew Auld if (INTEL_GEN(i915) < 6 && !intel_enable_gtt()) 11772c86e55dSMatthew Auld return -EIO; 11782c86e55dSMatthew Auld 11792c86e55dSMatthew Auld return 0; 11802c86e55dSMatthew Auld } 11812c86e55dSMatthew Auld 11822c86e55dSMatthew Auld void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) 11832c86e55dSMatthew Auld { 11842c86e55dSMatthew Auld GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); 11852c86e55dSMatthew Auld 11862c86e55dSMatthew Auld ggtt->invalidate = guc_ggtt_invalidate; 11872c86e55dSMatthew Auld 11882c86e55dSMatthew Auld ggtt->invalidate(ggtt); 11892c86e55dSMatthew Auld } 11902c86e55dSMatthew Auld 11912c86e55dSMatthew Auld void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) 11922c86e55dSMatthew Auld { 11932c86e55dSMatthew Auld /* XXX Temporary pardon for error unload */ 11942c86e55dSMatthew Auld if (ggtt->invalidate == gen8_ggtt_invalidate) 11952c86e55dSMatthew Auld return; 11962c86e55dSMatthew Auld 11972c86e55dSMatthew Auld /* We should only be called after i915_ggtt_enable_guc() */ 11982c86e55dSMatthew Auld GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); 11992c86e55dSMatthew Auld 12002c86e55dSMatthew Auld ggtt->invalidate = gen8_ggtt_invalidate; 12012c86e55dSMatthew Auld 12022c86e55dSMatthew Auld ggtt->invalidate(ggtt); 12032c86e55dSMatthew Auld } 12042c86e55dSMatthew Auld 1205e986209cSChris Wilson void i915_ggtt_resume(struct i915_ggtt *ggtt) 12062c86e55dSMatthew Auld { 120780e5351dSChris Wilson struct i915_vma *vma; 12082c86e55dSMatthew Auld bool flush = false; 12092c86e55dSMatthew Auld int open; 12102c86e55dSMatthew Auld 12112c86e55dSMatthew Auld intel_gt_check_and_clear_faults(ggtt->vm.gt); 12122c86e55dSMatthew Auld 12132c86e55dSMatthew Auld /* First fill our portion of the GTT with scratch pages */ 12142c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 12152c86e55dSMatthew Auld 12162c86e55dSMatthew Auld /* Skip rewriting PTE on VMA unbind. */ 12172c86e55dSMatthew Auld open = atomic_xchg(&ggtt->vm.open, 0); 12182c86e55dSMatthew Auld 12192c86e55dSMatthew Auld /* clflush objects bound into the GGTT and rebind them. */ 122080e5351dSChris Wilson list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { 12212c86e55dSMatthew Auld struct drm_i915_gem_object *obj = vma->obj; 1222cd0452aaSChris Wilson unsigned int was_bound = 1223cd0452aaSChris Wilson atomic_read(&vma->flags) & I915_VMA_BIND_MASK; 12242c86e55dSMatthew Auld 1225cd0452aaSChris Wilson GEM_BUG_ON(!was_bound); 1226cd0452aaSChris Wilson vma->ops->bind_vma(&ggtt->vm, NULL, vma, 12272c86e55dSMatthew Auld obj ? obj->cache_level : 0, 1228cd0452aaSChris Wilson was_bound); 12292c86e55dSMatthew Auld if (obj) { /* only used during resume => exclusive access */ 12302c86e55dSMatthew Auld flush |= fetch_and_zero(&obj->write_domain); 12312c86e55dSMatthew Auld obj->read_domains |= I915_GEM_DOMAIN_GTT; 12322c86e55dSMatthew Auld } 12332c86e55dSMatthew Auld } 12342c86e55dSMatthew Auld 12352c86e55dSMatthew Auld atomic_set(&ggtt->vm.open, open); 12362c86e55dSMatthew Auld ggtt->invalidate(ggtt); 12372c86e55dSMatthew Auld 12382c86e55dSMatthew Auld if (flush) 12392c86e55dSMatthew Auld wbinvd_on_all_cpus(); 12402c86e55dSMatthew Auld 1241e986209cSChris Wilson if (INTEL_GEN(ggtt->vm.i915) >= 8) 12422c86e55dSMatthew Auld setup_private_pat(ggtt->vm.gt->uncore); 1243dec9cf9eSChris Wilson 1244dec9cf9eSChris Wilson intel_ggtt_restore_fences(ggtt); 12452c86e55dSMatthew Auld } 12462c86e55dSMatthew Auld 12472c86e55dSMatthew Auld static struct scatterlist * 12482c86e55dSMatthew Auld rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 12492c86e55dSMatthew Auld unsigned int width, unsigned int height, 12502c86e55dSMatthew Auld unsigned int stride, 12512c86e55dSMatthew Auld struct sg_table *st, struct scatterlist *sg) 12522c86e55dSMatthew Auld { 12532c86e55dSMatthew Auld unsigned int column, row; 12542c86e55dSMatthew Auld unsigned int src_idx; 12552c86e55dSMatthew Auld 12562c86e55dSMatthew Auld for (column = 0; column < width; column++) { 12572c86e55dSMatthew Auld src_idx = stride * (height - 1) + column + offset; 12582c86e55dSMatthew Auld for (row = 0; row < height; row++) { 12592c86e55dSMatthew Auld st->nents++; 12602c86e55dSMatthew Auld /* 12612c86e55dSMatthew Auld * We don't need the pages, but need to initialize 12622c86e55dSMatthew Auld * the entries so the sg list can be happily traversed. 12632c86e55dSMatthew Auld * The only thing we need are DMA addresses. 12642c86e55dSMatthew Auld */ 12652c86e55dSMatthew Auld sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 12662c86e55dSMatthew Auld sg_dma_address(sg) = 12672c86e55dSMatthew Auld i915_gem_object_get_dma_address(obj, src_idx); 12682c86e55dSMatthew Auld sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 12692c86e55dSMatthew Auld sg = sg_next(sg); 12702c86e55dSMatthew Auld src_idx -= stride; 12712c86e55dSMatthew Auld } 12722c86e55dSMatthew Auld } 12732c86e55dSMatthew Auld 12742c86e55dSMatthew Auld return sg; 12752c86e55dSMatthew Auld } 12762c86e55dSMatthew Auld 12772c86e55dSMatthew Auld static noinline struct sg_table * 12782c86e55dSMatthew Auld intel_rotate_pages(struct intel_rotation_info *rot_info, 12792c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 12802c86e55dSMatthew Auld { 12812c86e55dSMatthew Auld unsigned int size = intel_rotation_info_size(rot_info); 128252ce7074SWambui Karuga struct drm_i915_private *i915 = to_i915(obj->base.dev); 12832c86e55dSMatthew Auld struct sg_table *st; 12842c86e55dSMatthew Auld struct scatterlist *sg; 12852c86e55dSMatthew Auld int ret = -ENOMEM; 12862c86e55dSMatthew Auld int i; 12872c86e55dSMatthew Auld 12882c86e55dSMatthew Auld /* Allocate target SG list. */ 12892c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 12902c86e55dSMatthew Auld if (!st) 12912c86e55dSMatthew Auld goto err_st_alloc; 12922c86e55dSMatthew Auld 12932c86e55dSMatthew Auld ret = sg_alloc_table(st, size, GFP_KERNEL); 12942c86e55dSMatthew Auld if (ret) 12952c86e55dSMatthew Auld goto err_sg_alloc; 12962c86e55dSMatthew Auld 12972c86e55dSMatthew Auld st->nents = 0; 12982c86e55dSMatthew Auld sg = st->sgl; 12992c86e55dSMatthew Auld 13002c86e55dSMatthew Auld for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { 13012c86e55dSMatthew Auld sg = rotate_pages(obj, rot_info->plane[i].offset, 13022c86e55dSMatthew Auld rot_info->plane[i].width, rot_info->plane[i].height, 13032c86e55dSMatthew Auld rot_info->plane[i].stride, st, sg); 13042c86e55dSMatthew Auld } 13052c86e55dSMatthew Auld 13062c86e55dSMatthew Auld return st; 13072c86e55dSMatthew Auld 13082c86e55dSMatthew Auld err_sg_alloc: 13092c86e55dSMatthew Auld kfree(st); 13102c86e55dSMatthew Auld err_st_alloc: 13112c86e55dSMatthew Auld 131252ce7074SWambui Karuga drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 131352ce7074SWambui Karuga obj->base.size, rot_info->plane[0].width, 131452ce7074SWambui Karuga rot_info->plane[0].height, size); 13152c86e55dSMatthew Auld 13162c86e55dSMatthew Auld return ERR_PTR(ret); 13172c86e55dSMatthew Auld } 13182c86e55dSMatthew Auld 13192c86e55dSMatthew Auld static struct scatterlist * 13202c86e55dSMatthew Auld remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, 13212c86e55dSMatthew Auld unsigned int width, unsigned int height, 13222c86e55dSMatthew Auld unsigned int stride, 13232c86e55dSMatthew Auld struct sg_table *st, struct scatterlist *sg) 13242c86e55dSMatthew Auld { 13252c86e55dSMatthew Auld unsigned int row; 13262c86e55dSMatthew Auld 13272c86e55dSMatthew Auld for (row = 0; row < height; row++) { 13282c86e55dSMatthew Auld unsigned int left = width * I915_GTT_PAGE_SIZE; 13292c86e55dSMatthew Auld 13302c86e55dSMatthew Auld while (left) { 13312c86e55dSMatthew Auld dma_addr_t addr; 13322c86e55dSMatthew Auld unsigned int length; 13332c86e55dSMatthew Auld 13342c86e55dSMatthew Auld /* 13352c86e55dSMatthew Auld * We don't need the pages, but need to initialize 13362c86e55dSMatthew Auld * the entries so the sg list can be happily traversed. 13372c86e55dSMatthew Auld * The only thing we need are DMA addresses. 13382c86e55dSMatthew Auld */ 13392c86e55dSMatthew Auld 13402c86e55dSMatthew Auld addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 13412c86e55dSMatthew Auld 13422c86e55dSMatthew Auld length = min(left, length); 13432c86e55dSMatthew Auld 13442c86e55dSMatthew Auld st->nents++; 13452c86e55dSMatthew Auld 13462c86e55dSMatthew Auld sg_set_page(sg, NULL, length, 0); 13472c86e55dSMatthew Auld sg_dma_address(sg) = addr; 13482c86e55dSMatthew Auld sg_dma_len(sg) = length; 13492c86e55dSMatthew Auld sg = sg_next(sg); 13502c86e55dSMatthew Auld 13512c86e55dSMatthew Auld offset += length / I915_GTT_PAGE_SIZE; 13522c86e55dSMatthew Auld left -= length; 13532c86e55dSMatthew Auld } 13542c86e55dSMatthew Auld 13552c86e55dSMatthew Auld offset += stride - width; 13562c86e55dSMatthew Auld } 13572c86e55dSMatthew Auld 13582c86e55dSMatthew Auld return sg; 13592c86e55dSMatthew Auld } 13602c86e55dSMatthew Auld 13612c86e55dSMatthew Auld static noinline struct sg_table * 13622c86e55dSMatthew Auld intel_remap_pages(struct intel_remapped_info *rem_info, 13632c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 13642c86e55dSMatthew Auld { 13652c86e55dSMatthew Auld unsigned int size = intel_remapped_info_size(rem_info); 136652ce7074SWambui Karuga struct drm_i915_private *i915 = to_i915(obj->base.dev); 13672c86e55dSMatthew Auld struct sg_table *st; 13682c86e55dSMatthew Auld struct scatterlist *sg; 13692c86e55dSMatthew Auld int ret = -ENOMEM; 13702c86e55dSMatthew Auld int i; 13712c86e55dSMatthew Auld 13722c86e55dSMatthew Auld /* Allocate target SG list. */ 13732c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 13742c86e55dSMatthew Auld if (!st) 13752c86e55dSMatthew Auld goto err_st_alloc; 13762c86e55dSMatthew Auld 13772c86e55dSMatthew Auld ret = sg_alloc_table(st, size, GFP_KERNEL); 13782c86e55dSMatthew Auld if (ret) 13792c86e55dSMatthew Auld goto err_sg_alloc; 13802c86e55dSMatthew Auld 13812c86e55dSMatthew Auld st->nents = 0; 13822c86e55dSMatthew Auld sg = st->sgl; 13832c86e55dSMatthew Auld 13842c86e55dSMatthew Auld for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 13852c86e55dSMatthew Auld sg = remap_pages(obj, rem_info->plane[i].offset, 13862c86e55dSMatthew Auld rem_info->plane[i].width, rem_info->plane[i].height, 13872c86e55dSMatthew Auld rem_info->plane[i].stride, st, sg); 13882c86e55dSMatthew Auld } 13892c86e55dSMatthew Auld 13902c86e55dSMatthew Auld i915_sg_trim(st); 13912c86e55dSMatthew Auld 13922c86e55dSMatthew Auld return st; 13932c86e55dSMatthew Auld 13942c86e55dSMatthew Auld err_sg_alloc: 13952c86e55dSMatthew Auld kfree(st); 13962c86e55dSMatthew Auld err_st_alloc: 13972c86e55dSMatthew Auld 139852ce7074SWambui Karuga drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 139952ce7074SWambui Karuga obj->base.size, rem_info->plane[0].width, 140052ce7074SWambui Karuga rem_info->plane[0].height, size); 14012c86e55dSMatthew Auld 14022c86e55dSMatthew Auld return ERR_PTR(ret); 14032c86e55dSMatthew Auld } 14042c86e55dSMatthew Auld 14052c86e55dSMatthew Auld static noinline struct sg_table * 14062c86e55dSMatthew Auld intel_partial_pages(const struct i915_ggtt_view *view, 14072c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 14082c86e55dSMatthew Auld { 14092c86e55dSMatthew Auld struct sg_table *st; 14102c86e55dSMatthew Auld struct scatterlist *sg, *iter; 14112c86e55dSMatthew Auld unsigned int count = view->partial.size; 14122c86e55dSMatthew Auld unsigned int offset; 14132c86e55dSMatthew Auld int ret = -ENOMEM; 14142c86e55dSMatthew Auld 14152c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 14162c86e55dSMatthew Auld if (!st) 14172c86e55dSMatthew Auld goto err_st_alloc; 14182c86e55dSMatthew Auld 14192c86e55dSMatthew Auld ret = sg_alloc_table(st, count, GFP_KERNEL); 14202c86e55dSMatthew Auld if (ret) 14212c86e55dSMatthew Auld goto err_sg_alloc; 14222c86e55dSMatthew Auld 1423934941edSTvrtko Ursulin iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); 14242c86e55dSMatthew Auld GEM_BUG_ON(!iter); 14252c86e55dSMatthew Auld 14262c86e55dSMatthew Auld sg = st->sgl; 14272c86e55dSMatthew Auld st->nents = 0; 14282c86e55dSMatthew Auld do { 14292c86e55dSMatthew Auld unsigned int len; 14302c86e55dSMatthew Auld 1431934941edSTvrtko Ursulin len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 14322c86e55dSMatthew Auld count << PAGE_SHIFT); 14332c86e55dSMatthew Auld sg_set_page(sg, NULL, len, 0); 14342c86e55dSMatthew Auld sg_dma_address(sg) = 14352c86e55dSMatthew Auld sg_dma_address(iter) + (offset << PAGE_SHIFT); 14362c86e55dSMatthew Auld sg_dma_len(sg) = len; 14372c86e55dSMatthew Auld 14382c86e55dSMatthew Auld st->nents++; 14392c86e55dSMatthew Auld count -= len >> PAGE_SHIFT; 14402c86e55dSMatthew Auld if (count == 0) { 14412c86e55dSMatthew Auld sg_mark_end(sg); 14422c86e55dSMatthew Auld i915_sg_trim(st); /* Drop any unused tail entries. */ 14432c86e55dSMatthew Auld 14442c86e55dSMatthew Auld return st; 14452c86e55dSMatthew Auld } 14462c86e55dSMatthew Auld 14472c86e55dSMatthew Auld sg = __sg_next(sg); 14482c86e55dSMatthew Auld iter = __sg_next(iter); 14492c86e55dSMatthew Auld offset = 0; 14502c86e55dSMatthew Auld } while (1); 14512c86e55dSMatthew Auld 14522c86e55dSMatthew Auld err_sg_alloc: 14532c86e55dSMatthew Auld kfree(st); 14542c86e55dSMatthew Auld err_st_alloc: 14552c86e55dSMatthew Auld return ERR_PTR(ret); 14562c86e55dSMatthew Auld } 14572c86e55dSMatthew Auld 14582c86e55dSMatthew Auld static int 14592c86e55dSMatthew Auld i915_get_ggtt_vma_pages(struct i915_vma *vma) 14602c86e55dSMatthew Auld { 14612c86e55dSMatthew Auld int ret; 14622c86e55dSMatthew Auld 14632c86e55dSMatthew Auld /* 14642c86e55dSMatthew Auld * The vma->pages are only valid within the lifespan of the borrowed 14652c86e55dSMatthew Auld * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 14662c86e55dSMatthew Auld * must be the vma->pages. A simple rule is that vma->pages must only 14672c86e55dSMatthew Auld * be accessed when the obj->mm.pages are pinned. 14682c86e55dSMatthew Auld */ 14692c86e55dSMatthew Auld GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 14702c86e55dSMatthew Auld 14712c86e55dSMatthew Auld switch (vma->ggtt_view.type) { 14722c86e55dSMatthew Auld default: 14732c86e55dSMatthew Auld GEM_BUG_ON(vma->ggtt_view.type); 1474df561f66SGustavo A. R. Silva fallthrough; 14752c86e55dSMatthew Auld case I915_GGTT_VIEW_NORMAL: 14762c86e55dSMatthew Auld vma->pages = vma->obj->mm.pages; 14772c86e55dSMatthew Auld return 0; 14782c86e55dSMatthew Auld 14792c86e55dSMatthew Auld case I915_GGTT_VIEW_ROTATED: 14802c86e55dSMatthew Auld vma->pages = 14812c86e55dSMatthew Auld intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 14822c86e55dSMatthew Auld break; 14832c86e55dSMatthew Auld 14842c86e55dSMatthew Auld case I915_GGTT_VIEW_REMAPPED: 14852c86e55dSMatthew Auld vma->pages = 14862c86e55dSMatthew Auld intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); 14872c86e55dSMatthew Auld break; 14882c86e55dSMatthew Auld 14892c86e55dSMatthew Auld case I915_GGTT_VIEW_PARTIAL: 14902c86e55dSMatthew Auld vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 14912c86e55dSMatthew Auld break; 14922c86e55dSMatthew Auld } 14932c86e55dSMatthew Auld 14942c86e55dSMatthew Auld ret = 0; 14952c86e55dSMatthew Auld if (IS_ERR(vma->pages)) { 14962c86e55dSMatthew Auld ret = PTR_ERR(vma->pages); 14972c86e55dSMatthew Auld vma->pages = NULL; 149852ce7074SWambui Karuga drm_err(&vma->vm->i915->drm, 149952ce7074SWambui Karuga "Failed to get pages for VMA view type %u (%d)!\n", 15002c86e55dSMatthew Auld vma->ggtt_view.type, ret); 15012c86e55dSMatthew Auld } 15022c86e55dSMatthew Auld return ret; 15032c86e55dSMatthew Auld } 1504