12c86e55dSMatthew Auld // SPDX-License-Identifier: MIT 22c86e55dSMatthew Auld /* 32c86e55dSMatthew Auld * Copyright © 2020 Intel Corporation 42c86e55dSMatthew Auld */ 52c86e55dSMatthew Auld 62c86e55dSMatthew Auld #include <linux/stop_machine.h> 72c86e55dSMatthew Auld 82c86e55dSMatthew Auld #include <asm/set_memory.h> 98801eb48SChen Zhou #include <asm/smp.h> 102c86e55dSMatthew Auld 1183d2bdb6SJani Nikula #include <drm/i915_drm.h> 1283d2bdb6SJani Nikula 132c86e55dSMatthew Auld #include "intel_gt.h" 142c86e55dSMatthew Auld #include "i915_drv.h" 152c86e55dSMatthew Auld #include "i915_scatterlist.h" 162c86e55dSMatthew Auld #include "i915_vgpu.h" 172c86e55dSMatthew Auld 182c86e55dSMatthew Auld #include "intel_gtt.h" 192c86e55dSMatthew Auld 202c86e55dSMatthew Auld static int 212c86e55dSMatthew Auld i915_get_ggtt_vma_pages(struct i915_vma *vma); 222c86e55dSMatthew Auld 232c86e55dSMatthew Auld static void i915_ggtt_color_adjust(const struct drm_mm_node *node, 242c86e55dSMatthew Auld unsigned long color, 252c86e55dSMatthew Auld u64 *start, 262c86e55dSMatthew Auld u64 *end) 272c86e55dSMatthew Auld { 282c86e55dSMatthew Auld if (i915_node_color_differs(node, color)) 292c86e55dSMatthew Auld *start += I915_GTT_PAGE_SIZE; 302c86e55dSMatthew Auld 312c86e55dSMatthew Auld /* 322c86e55dSMatthew Auld * Also leave a space between the unallocated reserved node after the 332c86e55dSMatthew Auld * GTT and any objects within the GTT, i.e. we use the color adjustment 342c86e55dSMatthew Auld * to insert a guard page to prevent prefetches crossing over the 352c86e55dSMatthew Auld * GTT boundary. 362c86e55dSMatthew Auld */ 372c86e55dSMatthew Auld node = list_next_entry(node, node_list); 382c86e55dSMatthew Auld if (node->color != color) 392c86e55dSMatthew Auld *end -= I915_GTT_PAGE_SIZE; 402c86e55dSMatthew Auld } 412c86e55dSMatthew Auld 422c86e55dSMatthew Auld static int ggtt_init_hw(struct i915_ggtt *ggtt) 432c86e55dSMatthew Auld { 442c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 452c86e55dSMatthew Auld 462c86e55dSMatthew Auld i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); 472c86e55dSMatthew Auld 482c86e55dSMatthew Auld ggtt->vm.is_ggtt = true; 492c86e55dSMatthew Auld 502c86e55dSMatthew Auld /* Only VLV supports read-only GGTT mappings */ 512c86e55dSMatthew Auld ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); 522c86e55dSMatthew Auld 532c86e55dSMatthew Auld if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) 542c86e55dSMatthew Auld ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; 552c86e55dSMatthew Auld 562c86e55dSMatthew Auld if (ggtt->mappable_end) { 572c86e55dSMatthew Auld if (!io_mapping_init_wc(&ggtt->iomap, 582c86e55dSMatthew Auld ggtt->gmadr.start, 592c86e55dSMatthew Auld ggtt->mappable_end)) { 602c86e55dSMatthew Auld ggtt->vm.cleanup(&ggtt->vm); 612c86e55dSMatthew Auld return -EIO; 622c86e55dSMatthew Auld } 632c86e55dSMatthew Auld 642c86e55dSMatthew Auld ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, 652c86e55dSMatthew Auld ggtt->mappable_end); 662c86e55dSMatthew Auld } 672c86e55dSMatthew Auld 68f899f786SChris Wilson intel_ggtt_init_fences(ggtt); 692c86e55dSMatthew Auld 702c86e55dSMatthew Auld return 0; 712c86e55dSMatthew Auld } 722c86e55dSMatthew Auld 732c86e55dSMatthew Auld /** 742c86e55dSMatthew Auld * i915_ggtt_init_hw - Initialize GGTT hardware 752c86e55dSMatthew Auld * @i915: i915 device 762c86e55dSMatthew Auld */ 772c86e55dSMatthew Auld int i915_ggtt_init_hw(struct drm_i915_private *i915) 782c86e55dSMatthew Auld { 792c86e55dSMatthew Auld int ret; 802c86e55dSMatthew Auld 812c86e55dSMatthew Auld /* 822c86e55dSMatthew Auld * Note that we use page colouring to enforce a guard page at the 832c86e55dSMatthew Auld * end of the address space. This is required as the CS may prefetch 842c86e55dSMatthew Auld * beyond the end of the batch buffer, across the page boundary, 852c86e55dSMatthew Auld * and beyond the end of the GTT if we do not provide a guard. 862c86e55dSMatthew Auld */ 872c86e55dSMatthew Auld ret = ggtt_init_hw(&i915->ggtt); 882c86e55dSMatthew Auld if (ret) 892c86e55dSMatthew Auld return ret; 902c86e55dSMatthew Auld 912c86e55dSMatthew Auld return 0; 922c86e55dSMatthew Auld } 932c86e55dSMatthew Auld 942c86e55dSMatthew Auld /* 952c86e55dSMatthew Auld * Certain Gen5 chipsets require require idling the GPU before 962c86e55dSMatthew Auld * unmapping anything from the GTT when VT-d is enabled. 972c86e55dSMatthew Auld */ 982c86e55dSMatthew Auld static bool needs_idle_maps(struct drm_i915_private *i915) 992c86e55dSMatthew Auld { 1002c86e55dSMatthew Auld /* 1012c86e55dSMatthew Auld * Query intel_iommu to see if we need the workaround. Presumably that 1022c86e55dSMatthew Auld * was loaded first. 1032c86e55dSMatthew Auld */ 1042c86e55dSMatthew Auld return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active(); 1052c86e55dSMatthew Auld } 1062c86e55dSMatthew Auld 107e986209cSChris Wilson void i915_ggtt_suspend(struct i915_ggtt *ggtt) 1082c86e55dSMatthew Auld { 109bffa18ddSChris Wilson struct i915_vma *vma, *vn; 110bffa18ddSChris Wilson int open; 111e3793468SChris Wilson 112bffa18ddSChris Wilson mutex_lock(&ggtt->vm.mutex); 113bffa18ddSChris Wilson 114bffa18ddSChris Wilson /* Skip rewriting PTE on VMA unbind. */ 115bffa18ddSChris Wilson open = atomic_xchg(&ggtt->vm.open, 0); 116bffa18ddSChris Wilson 117bffa18ddSChris Wilson list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { 118bffa18ddSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 119e3793468SChris Wilson i915_vma_wait_for_bind(vma); 120e3793468SChris Wilson 121bffa18ddSChris Wilson if (i915_vma_is_pinned(vma)) 122bffa18ddSChris Wilson continue; 123bffa18ddSChris Wilson 124bffa18ddSChris Wilson if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { 125bffa18ddSChris Wilson __i915_vma_evict(vma); 126bffa18ddSChris Wilson drm_mm_remove_node(&vma->node); 127bffa18ddSChris Wilson } 128bffa18ddSChris Wilson } 129bffa18ddSChris Wilson 130e986209cSChris Wilson ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 131e986209cSChris Wilson ggtt->invalidate(ggtt); 132bffa18ddSChris Wilson atomic_set(&ggtt->vm.open, open); 133bffa18ddSChris Wilson 134bffa18ddSChris Wilson mutex_unlock(&ggtt->vm.mutex); 1352c86e55dSMatthew Auld 1362c86e55dSMatthew Auld intel_gt_check_and_clear_faults(ggtt->vm.gt); 1372c86e55dSMatthew Auld } 1382c86e55dSMatthew Auld 1392c86e55dSMatthew Auld void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) 1402c86e55dSMatthew Auld { 1412c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1422c86e55dSMatthew Auld 1432c86e55dSMatthew Auld spin_lock_irq(&uncore->lock); 1442c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1452c86e55dSMatthew Auld intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); 1462c86e55dSMatthew Auld spin_unlock_irq(&uncore->lock); 1472c86e55dSMatthew Auld } 1482c86e55dSMatthew Auld 1492c86e55dSMatthew Auld static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) 1502c86e55dSMatthew Auld { 1512c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1522c86e55dSMatthew Auld 1532c86e55dSMatthew Auld /* 1542c86e55dSMatthew Auld * Note that as an uncached mmio write, this will flush the 1552c86e55dSMatthew Auld * WCB of the writes into the GGTT before it triggers the invalidate. 1562c86e55dSMatthew Auld */ 1572c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1582c86e55dSMatthew Auld } 1592c86e55dSMatthew Auld 1602c86e55dSMatthew Auld static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) 1612c86e55dSMatthew Auld { 1622c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1632c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 1642c86e55dSMatthew Auld 1652c86e55dSMatthew Auld gen8_ggtt_invalidate(ggtt); 1662c86e55dSMatthew Auld 1672c86e55dSMatthew Auld if (INTEL_GEN(i915) >= 12) 1682c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, 1692c86e55dSMatthew Auld GEN12_GUC_TLB_INV_CR_INVALIDATE); 1702c86e55dSMatthew Auld else 1712c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); 1722c86e55dSMatthew Auld } 1732c86e55dSMatthew Auld 1742c86e55dSMatthew Auld static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) 1752c86e55dSMatthew Auld { 1762c86e55dSMatthew Auld intel_gtt_chipset_flush(); 1772c86e55dSMatthew Auld } 1782c86e55dSMatthew Auld 17969edc390SDaniele Ceraolo Spurio static u64 gen8_ggtt_pte_encode(dma_addr_t addr, 18069edc390SDaniele Ceraolo Spurio enum i915_cache_level level, 18169edc390SDaniele Ceraolo Spurio u32 flags) 18269edc390SDaniele Ceraolo Spurio { 18369edc390SDaniele Ceraolo Spurio return addr | _PAGE_PRESENT; 18469edc390SDaniele Ceraolo Spurio } 18569edc390SDaniele Ceraolo Spurio 1862c86e55dSMatthew Auld static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 1872c86e55dSMatthew Auld { 1882c86e55dSMatthew Auld writeq(pte, addr); 1892c86e55dSMatthew Auld } 1902c86e55dSMatthew Auld 1912c86e55dSMatthew Auld static void gen8_ggtt_insert_page(struct i915_address_space *vm, 1922c86e55dSMatthew Auld dma_addr_t addr, 1932c86e55dSMatthew Auld u64 offset, 1942c86e55dSMatthew Auld enum i915_cache_level level, 1952c86e55dSMatthew Auld u32 unused) 1962c86e55dSMatthew Auld { 1972c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 1982c86e55dSMatthew Auld gen8_pte_t __iomem *pte = 1992c86e55dSMatthew Auld (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2002c86e55dSMatthew Auld 20169edc390SDaniele Ceraolo Spurio gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0)); 2022c86e55dSMatthew Auld 2032c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2042c86e55dSMatthew Auld } 2052c86e55dSMatthew Auld 2062c86e55dSMatthew Auld static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2072c86e55dSMatthew Auld struct i915_vma *vma, 2082c86e55dSMatthew Auld enum i915_cache_level level, 2092c86e55dSMatthew Auld u32 flags) 2102c86e55dSMatthew Auld { 21169edc390SDaniele Ceraolo Spurio const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); 2124d6c1859SChris Wilson struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2134d6c1859SChris Wilson gen8_pte_t __iomem *gte; 2144d6c1859SChris Wilson gen8_pte_t __iomem *end; 2154d6c1859SChris Wilson struct sgt_iter iter; 2162c86e55dSMatthew Auld dma_addr_t addr; 2172c86e55dSMatthew Auld 2182c86e55dSMatthew Auld /* 2192c86e55dSMatthew Auld * Note that we ignore PTE_READ_ONLY here. The caller must be careful 2202c86e55dSMatthew Auld * not to allow the user to override access to a read only page. 2212c86e55dSMatthew Auld */ 2222c86e55dSMatthew Auld 2234d6c1859SChris Wilson gte = (gen8_pte_t __iomem *)ggtt->gsm; 2244d6c1859SChris Wilson gte += vma->node.start / I915_GTT_PAGE_SIZE; 2254d6c1859SChris Wilson end = gte + vma->node.size / I915_GTT_PAGE_SIZE; 2264d6c1859SChris Wilson 2274d6c1859SChris Wilson for_each_sgt_daddr(addr, iter, vma->pages) 2284d6c1859SChris Wilson gen8_set_pte(gte++, pte_encode | addr); 2294d6c1859SChris Wilson GEM_BUG_ON(gte > end); 2304d6c1859SChris Wilson 2314d6c1859SChris Wilson /* Fill the allocated but "unused" space beyond the end of the buffer */ 2324d6c1859SChris Wilson while (gte < end) 23389351925SChris Wilson gen8_set_pte(gte++, vm->scratch[0]->encode); 2342c86e55dSMatthew Auld 2352c86e55dSMatthew Auld /* 2362c86e55dSMatthew Auld * We want to flush the TLBs only after we're certain all the PTE 2372c86e55dSMatthew Auld * updates have finished. 2382c86e55dSMatthew Auld */ 2392c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2402c86e55dSMatthew Auld } 2412c86e55dSMatthew Auld 2422c86e55dSMatthew Auld static void gen6_ggtt_insert_page(struct i915_address_space *vm, 2432c86e55dSMatthew Auld dma_addr_t addr, 2442c86e55dSMatthew Auld u64 offset, 2452c86e55dSMatthew Auld enum i915_cache_level level, 2462c86e55dSMatthew Auld u32 flags) 2472c86e55dSMatthew Auld { 2482c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2492c86e55dSMatthew Auld gen6_pte_t __iomem *pte = 2502c86e55dSMatthew Auld (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2512c86e55dSMatthew Auld 2522c86e55dSMatthew Auld iowrite32(vm->pte_encode(addr, level, flags), pte); 2532c86e55dSMatthew Auld 2542c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2552c86e55dSMatthew Auld } 2562c86e55dSMatthew Auld 2572c86e55dSMatthew Auld /* 2582c86e55dSMatthew Auld * Binds an object into the global gtt with the specified cache level. 2592c86e55dSMatthew Auld * The object will be accessible to the GPU via commands whose operands 2602c86e55dSMatthew Auld * reference offsets within the global GTT as well as accessible by the GPU 2612c86e55dSMatthew Auld * through the GMADR mapped BAR (i915->mm.gtt->gtt). 2622c86e55dSMatthew Auld */ 2632c86e55dSMatthew Auld static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 2642c86e55dSMatthew Auld struct i915_vma *vma, 2652c86e55dSMatthew Auld enum i915_cache_level level, 2662c86e55dSMatthew Auld u32 flags) 2672c86e55dSMatthew Auld { 2682c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2694d6c1859SChris Wilson gen6_pte_t __iomem *gte; 2704d6c1859SChris Wilson gen6_pte_t __iomem *end; 2712c86e55dSMatthew Auld struct sgt_iter iter; 2722c86e55dSMatthew Auld dma_addr_t addr; 2732c86e55dSMatthew Auld 2744d6c1859SChris Wilson gte = (gen6_pte_t __iomem *)ggtt->gsm; 2754d6c1859SChris Wilson gte += vma->node.start / I915_GTT_PAGE_SIZE; 2764d6c1859SChris Wilson end = gte + vma->node.size / I915_GTT_PAGE_SIZE; 2774d6c1859SChris Wilson 2782c86e55dSMatthew Auld for_each_sgt_daddr(addr, iter, vma->pages) 2794d6c1859SChris Wilson iowrite32(vm->pte_encode(addr, level, flags), gte++); 2804d6c1859SChris Wilson GEM_BUG_ON(gte > end); 2814d6c1859SChris Wilson 2824d6c1859SChris Wilson /* Fill the allocated but "unused" space beyond the end of the buffer */ 2834d6c1859SChris Wilson while (gte < end) 28489351925SChris Wilson iowrite32(vm->scratch[0]->encode, gte++); 2852c86e55dSMatthew Auld 2862c86e55dSMatthew Auld /* 2872c86e55dSMatthew Auld * We want to flush the TLBs only after we're certain all the PTE 2882c86e55dSMatthew Auld * updates have finished. 2892c86e55dSMatthew Auld */ 2902c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2912c86e55dSMatthew Auld } 2922c86e55dSMatthew Auld 2932c86e55dSMatthew Auld static void nop_clear_range(struct i915_address_space *vm, 2942c86e55dSMatthew Auld u64 start, u64 length) 2952c86e55dSMatthew Auld { 2962c86e55dSMatthew Auld } 2972c86e55dSMatthew Auld 2982c86e55dSMatthew Auld static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2992c86e55dSMatthew Auld u64 start, u64 length) 3002c86e55dSMatthew Auld { 3012c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 3022c86e55dSMatthew Auld unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 3032c86e55dSMatthew Auld unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 30489351925SChris Wilson const gen8_pte_t scratch_pte = vm->scratch[0]->encode; 3052c86e55dSMatthew Auld gen8_pte_t __iomem *gtt_base = 3062c86e55dSMatthew Auld (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 3072c86e55dSMatthew Auld const int max_entries = ggtt_total_entries(ggtt) - first_entry; 3082c86e55dSMatthew Auld int i; 3092c86e55dSMatthew Auld 3102c86e55dSMatthew Auld if (WARN(num_entries > max_entries, 3112c86e55dSMatthew Auld "First entry = %d; Num entries = %d (max=%d)\n", 3122c86e55dSMatthew Auld first_entry, num_entries, max_entries)) 3132c86e55dSMatthew Auld num_entries = max_entries; 3142c86e55dSMatthew Auld 3152c86e55dSMatthew Auld for (i = 0; i < num_entries; i++) 3162c86e55dSMatthew Auld gen8_set_pte(>t_base[i], scratch_pte); 3172c86e55dSMatthew Auld } 3182c86e55dSMatthew Auld 3192c86e55dSMatthew Auld static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) 3202c86e55dSMatthew Auld { 3212c86e55dSMatthew Auld /* 3222c86e55dSMatthew Auld * Make sure the internal GAM fifo has been cleared of all GTT 3232c86e55dSMatthew Auld * writes before exiting stop_machine(). This guarantees that 3242c86e55dSMatthew Auld * any aperture accesses waiting to start in another process 3252c86e55dSMatthew Auld * cannot back up behind the GTT writes causing a hang. 3262c86e55dSMatthew Auld * The register can be any arbitrary GAM register. 3272c86e55dSMatthew Auld */ 3282c86e55dSMatthew Auld intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); 3292c86e55dSMatthew Auld } 3302c86e55dSMatthew Auld 3312c86e55dSMatthew Auld struct insert_page { 3322c86e55dSMatthew Auld struct i915_address_space *vm; 3332c86e55dSMatthew Auld dma_addr_t addr; 3342c86e55dSMatthew Auld u64 offset; 3352c86e55dSMatthew Auld enum i915_cache_level level; 3362c86e55dSMatthew Auld }; 3372c86e55dSMatthew Auld 3382c86e55dSMatthew Auld static int bxt_vtd_ggtt_insert_page__cb(void *_arg) 3392c86e55dSMatthew Auld { 3402c86e55dSMatthew Auld struct insert_page *arg = _arg; 3412c86e55dSMatthew Auld 3422c86e55dSMatthew Auld gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); 3432c86e55dSMatthew Auld bxt_vtd_ggtt_wa(arg->vm); 3442c86e55dSMatthew Auld 3452c86e55dSMatthew Auld return 0; 3462c86e55dSMatthew Auld } 3472c86e55dSMatthew Auld 3482c86e55dSMatthew Auld static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, 3492c86e55dSMatthew Auld dma_addr_t addr, 3502c86e55dSMatthew Auld u64 offset, 3512c86e55dSMatthew Auld enum i915_cache_level level, 3522c86e55dSMatthew Auld u32 unused) 3532c86e55dSMatthew Auld { 3542c86e55dSMatthew Auld struct insert_page arg = { vm, addr, offset, level }; 3552c86e55dSMatthew Auld 3562c86e55dSMatthew Auld stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); 3572c86e55dSMatthew Auld } 3582c86e55dSMatthew Auld 3592c86e55dSMatthew Auld struct insert_entries { 3602c86e55dSMatthew Auld struct i915_address_space *vm; 3612c86e55dSMatthew Auld struct i915_vma *vma; 3622c86e55dSMatthew Auld enum i915_cache_level level; 3632c86e55dSMatthew Auld u32 flags; 3642c86e55dSMatthew Auld }; 3652c86e55dSMatthew Auld 3662c86e55dSMatthew Auld static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) 3672c86e55dSMatthew Auld { 3682c86e55dSMatthew Auld struct insert_entries *arg = _arg; 3692c86e55dSMatthew Auld 3702c86e55dSMatthew Auld gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); 3712c86e55dSMatthew Auld bxt_vtd_ggtt_wa(arg->vm); 3722c86e55dSMatthew Auld 3732c86e55dSMatthew Auld return 0; 3742c86e55dSMatthew Auld } 3752c86e55dSMatthew Auld 3762c86e55dSMatthew Auld static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, 3772c86e55dSMatthew Auld struct i915_vma *vma, 3782c86e55dSMatthew Auld enum i915_cache_level level, 3792c86e55dSMatthew Auld u32 flags) 3802c86e55dSMatthew Auld { 3812c86e55dSMatthew Auld struct insert_entries arg = { vm, vma, level, flags }; 3822c86e55dSMatthew Auld 3832c86e55dSMatthew Auld stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); 3842c86e55dSMatthew Auld } 3852c86e55dSMatthew Auld 3862c86e55dSMatthew Auld static void gen6_ggtt_clear_range(struct i915_address_space *vm, 3872c86e55dSMatthew Auld u64 start, u64 length) 3882c86e55dSMatthew Auld { 3892c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 3902c86e55dSMatthew Auld unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 3912c86e55dSMatthew Auld unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 3922c86e55dSMatthew Auld gen6_pte_t scratch_pte, __iomem *gtt_base = 3932c86e55dSMatthew Auld (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 3942c86e55dSMatthew Auld const int max_entries = ggtt_total_entries(ggtt) - first_entry; 3952c86e55dSMatthew Auld int i; 3962c86e55dSMatthew Auld 3972c86e55dSMatthew Auld if (WARN(num_entries > max_entries, 3982c86e55dSMatthew Auld "First entry = %d; Num entries = %d (max=%d)\n", 3992c86e55dSMatthew Auld first_entry, num_entries, max_entries)) 4002c86e55dSMatthew Auld num_entries = max_entries; 4012c86e55dSMatthew Auld 40289351925SChris Wilson scratch_pte = vm->scratch[0]->encode; 4032c86e55dSMatthew Auld for (i = 0; i < num_entries; i++) 4042c86e55dSMatthew Auld iowrite32(scratch_pte, >t_base[i]); 4052c86e55dSMatthew Auld } 4062c86e55dSMatthew Auld 4072c86e55dSMatthew Auld static void i915_ggtt_insert_page(struct i915_address_space *vm, 4082c86e55dSMatthew Auld dma_addr_t addr, 4092c86e55dSMatthew Auld u64 offset, 4102c86e55dSMatthew Auld enum i915_cache_level cache_level, 4112c86e55dSMatthew Auld u32 unused) 4122c86e55dSMatthew Auld { 4132c86e55dSMatthew Auld unsigned int flags = (cache_level == I915_CACHE_NONE) ? 4142c86e55dSMatthew Auld AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 4152c86e55dSMatthew Auld 4162c86e55dSMatthew Auld intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); 4172c86e55dSMatthew Auld } 4182c86e55dSMatthew Auld 4192c86e55dSMatthew Auld static void i915_ggtt_insert_entries(struct i915_address_space *vm, 4202c86e55dSMatthew Auld struct i915_vma *vma, 4212c86e55dSMatthew Auld enum i915_cache_level cache_level, 4222c86e55dSMatthew Auld u32 unused) 4232c86e55dSMatthew Auld { 4242c86e55dSMatthew Auld unsigned int flags = (cache_level == I915_CACHE_NONE) ? 4252c86e55dSMatthew Auld AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 4262c86e55dSMatthew Auld 4272c86e55dSMatthew Auld intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, 4282c86e55dSMatthew Auld flags); 4292c86e55dSMatthew Auld } 4302c86e55dSMatthew Auld 4312c86e55dSMatthew Auld static void i915_ggtt_clear_range(struct i915_address_space *vm, 4322c86e55dSMatthew Auld u64 start, u64 length) 4332c86e55dSMatthew Auld { 4342c86e55dSMatthew Auld intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); 4352c86e55dSMatthew Auld } 4362c86e55dSMatthew Auld 437cd0452aaSChris Wilson static void ggtt_bind_vma(struct i915_address_space *vm, 438cd0452aaSChris Wilson struct i915_vm_pt_stash *stash, 43912b07256SChris Wilson struct i915_vma *vma, 4402c86e55dSMatthew Auld enum i915_cache_level cache_level, 4412c86e55dSMatthew Auld u32 flags) 4422c86e55dSMatthew Auld { 4432c86e55dSMatthew Auld struct drm_i915_gem_object *obj = vma->obj; 4442c86e55dSMatthew Auld u32 pte_flags; 4452c86e55dSMatthew Auld 446bf0840cdSChris Wilson if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK)) 447cd0452aaSChris Wilson return; 448bf0840cdSChris Wilson 4492c86e55dSMatthew Auld /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ 4502c86e55dSMatthew Auld pte_flags = 0; 4512c86e55dSMatthew Auld if (i915_gem_object_is_readonly(obj)) 4522c86e55dSMatthew Auld pte_flags |= PTE_READ_ONLY; 4532c86e55dSMatthew Auld 45412b07256SChris Wilson vm->insert_entries(vm, vma, cache_level, pte_flags); 4552c86e55dSMatthew Auld vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 4562c86e55dSMatthew Auld } 4572c86e55dSMatthew Auld 45812b07256SChris Wilson static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) 4592c86e55dSMatthew Auld { 46012b07256SChris Wilson vm->clear_range(vm, vma->node.start, vma->size); 4612c86e55dSMatthew Auld } 4622c86e55dSMatthew Auld 4632c86e55dSMatthew Auld static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) 4642c86e55dSMatthew Auld { 4652c86e55dSMatthew Auld u64 size; 4662c86e55dSMatthew Auld int ret; 4672c86e55dSMatthew Auld 46834bbfde6SDaniele Ceraolo Spurio if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) 4692c86e55dSMatthew Auld return 0; 4702c86e55dSMatthew Auld 4712c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); 4722c86e55dSMatthew Auld size = ggtt->vm.total - GUC_GGTT_TOP; 4732c86e55dSMatthew Auld 4742c86e55dSMatthew Auld ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, 4752c86e55dSMatthew Auld GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, 4762c86e55dSMatthew Auld PIN_NOEVICT); 4772c86e55dSMatthew Auld if (ret) 47852ce7074SWambui Karuga drm_dbg(&ggtt->vm.i915->drm, 47952ce7074SWambui Karuga "Failed to reserve top of GGTT for GuC\n"); 4802c86e55dSMatthew Auld 4812c86e55dSMatthew Auld return ret; 4822c86e55dSMatthew Auld } 4832c86e55dSMatthew Auld 4842c86e55dSMatthew Auld static void ggtt_release_guc_top(struct i915_ggtt *ggtt) 4852c86e55dSMatthew Auld { 4862c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->uc_fw)) 4872c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->uc_fw); 4882c86e55dSMatthew Auld } 4892c86e55dSMatthew Auld 4902c86e55dSMatthew Auld static void cleanup_init_ggtt(struct i915_ggtt *ggtt) 4912c86e55dSMatthew Auld { 4922c86e55dSMatthew Auld ggtt_release_guc_top(ggtt); 4932c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->error_capture)) 4942c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->error_capture); 495742379c0SChris Wilson mutex_destroy(&ggtt->error_mutex); 4962c86e55dSMatthew Auld } 4972c86e55dSMatthew Auld 4982c86e55dSMatthew Auld static int init_ggtt(struct i915_ggtt *ggtt) 4992c86e55dSMatthew Auld { 5002c86e55dSMatthew Auld /* 5012c86e55dSMatthew Auld * Let GEM Manage all of the aperture. 5022c86e55dSMatthew Auld * 5032c86e55dSMatthew Auld * However, leave one page at the end still bound to the scratch page. 5042c86e55dSMatthew Auld * There are a number of places where the hardware apparently prefetches 5052c86e55dSMatthew Auld * past the end of the object, and we've seen multiple hangs with the 5062c86e55dSMatthew Auld * GPU head pointer stuck in a batchbuffer bound at the last page of the 5072c86e55dSMatthew Auld * aperture. One page should be enough to keep any prefetching inside 5082c86e55dSMatthew Auld * of the aperture. 5092c86e55dSMatthew Auld */ 5102c86e55dSMatthew Auld unsigned long hole_start, hole_end; 5112c86e55dSMatthew Auld struct drm_mm_node *entry; 5122c86e55dSMatthew Auld int ret; 5132c86e55dSMatthew Auld 5142c86e55dSMatthew Auld /* 5152c86e55dSMatthew Auld * GuC requires all resources that we're sharing with it to be placed in 5162c86e55dSMatthew Auld * non-WOPCM memory. If GuC is not present or not in use we still need a 5172c86e55dSMatthew Auld * small bias as ring wraparound at offset 0 sometimes hangs. No idea 5182c86e55dSMatthew Auld * why. 5192c86e55dSMatthew Auld */ 5202c86e55dSMatthew Auld ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, 5212c86e55dSMatthew Auld intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); 5222c86e55dSMatthew Auld 5232c86e55dSMatthew Auld ret = intel_vgt_balloon(ggtt); 5242c86e55dSMatthew Auld if (ret) 5252c86e55dSMatthew Auld return ret; 5262c86e55dSMatthew Auld 527742379c0SChris Wilson mutex_init(&ggtt->error_mutex); 5282c86e55dSMatthew Auld if (ggtt->mappable_end) { 5292c86e55dSMatthew Auld /* Reserve a mappable slot for our lockless error capture */ 5302c86e55dSMatthew Auld ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, 5312c86e55dSMatthew Auld &ggtt->error_capture, 5322c86e55dSMatthew Auld PAGE_SIZE, 0, 5332c86e55dSMatthew Auld I915_COLOR_UNEVICTABLE, 5342c86e55dSMatthew Auld 0, ggtt->mappable_end, 5352c86e55dSMatthew Auld DRM_MM_INSERT_LOW); 5362c86e55dSMatthew Auld if (ret) 5372c86e55dSMatthew Auld return ret; 5382c86e55dSMatthew Auld } 5392c86e55dSMatthew Auld 5402c86e55dSMatthew Auld /* 5412c86e55dSMatthew Auld * The upper portion of the GuC address space has a sizeable hole 5422c86e55dSMatthew Auld * (several MB) that is inaccessible by GuC. Reserve this range within 5432c86e55dSMatthew Auld * GGTT as it can comfortably hold GuC/HuC firmware images. 5442c86e55dSMatthew Auld */ 5452c86e55dSMatthew Auld ret = ggtt_reserve_guc_top(ggtt); 5462c86e55dSMatthew Auld if (ret) 5472c86e55dSMatthew Auld goto err; 5482c86e55dSMatthew Auld 5492c86e55dSMatthew Auld /* Clear any non-preallocated blocks */ 5502c86e55dSMatthew Auld drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { 55152ce7074SWambui Karuga drm_dbg_kms(&ggtt->vm.i915->drm, 55252ce7074SWambui Karuga "clearing unused GTT space: [%lx, %lx]\n", 5532c86e55dSMatthew Auld hole_start, hole_end); 5542c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, hole_start, 5552c86e55dSMatthew Auld hole_end - hole_start); 5562c86e55dSMatthew Auld } 5572c86e55dSMatthew Auld 5582c86e55dSMatthew Auld /* And finally clear the reserved guard page */ 5592c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 5602c86e55dSMatthew Auld 5612c86e55dSMatthew Auld return 0; 5622c86e55dSMatthew Auld 5632c86e55dSMatthew Auld err: 5642c86e55dSMatthew Auld cleanup_init_ggtt(ggtt); 5652c86e55dSMatthew Auld return ret; 5662c86e55dSMatthew Auld } 5672c86e55dSMatthew Auld 568cd0452aaSChris Wilson static void aliasing_gtt_bind_vma(struct i915_address_space *vm, 569cd0452aaSChris Wilson struct i915_vm_pt_stash *stash, 57012b07256SChris Wilson struct i915_vma *vma, 5712c86e55dSMatthew Auld enum i915_cache_level cache_level, 5722c86e55dSMatthew Auld u32 flags) 5732c86e55dSMatthew Auld { 5742c86e55dSMatthew Auld u32 pte_flags; 5752c86e55dSMatthew Auld 5762c86e55dSMatthew Auld /* Currently applicable only to VLV */ 5772c86e55dSMatthew Auld pte_flags = 0; 5782c86e55dSMatthew Auld if (i915_gem_object_is_readonly(vma->obj)) 5792c86e55dSMatthew Auld pte_flags |= PTE_READ_ONLY; 5802c86e55dSMatthew Auld 581cd0452aaSChris Wilson if (flags & I915_VMA_LOCAL_BIND) 582cd0452aaSChris Wilson ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, 583cd0452aaSChris Wilson stash, vma, cache_level, flags); 5842c86e55dSMatthew Auld 585c0e60347SChris Wilson if (flags & I915_VMA_GLOBAL_BIND) 58612b07256SChris Wilson vm->insert_entries(vm, vma, cache_level, pte_flags); 5872c86e55dSMatthew Auld } 5882c86e55dSMatthew Auld 58912b07256SChris Wilson static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, 59012b07256SChris Wilson struct i915_vma *vma) 5912c86e55dSMatthew Auld { 59212b07256SChris Wilson if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 5932c86e55dSMatthew Auld vm->clear_range(vm, vma->node.start, vma->size); 5942c86e55dSMatthew Auld 59512b07256SChris Wilson if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) 59612b07256SChris Wilson ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); 5972c86e55dSMatthew Auld } 5982c86e55dSMatthew Auld 5992c86e55dSMatthew Auld static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) 6002c86e55dSMatthew Auld { 601cd0452aaSChris Wilson struct i915_vm_pt_stash stash = {}; 6022c86e55dSMatthew Auld struct i915_ppgtt *ppgtt; 6032c86e55dSMatthew Auld int err; 6042c86e55dSMatthew Auld 6052c86e55dSMatthew Auld ppgtt = i915_ppgtt_create(ggtt->vm.gt); 6062c86e55dSMatthew Auld if (IS_ERR(ppgtt)) 6072c86e55dSMatthew Auld return PTR_ERR(ppgtt); 6082c86e55dSMatthew Auld 6092c86e55dSMatthew Auld if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { 6102c86e55dSMatthew Auld err = -ENODEV; 6112c86e55dSMatthew Auld goto err_ppgtt; 6122c86e55dSMatthew Auld } 6132c86e55dSMatthew Auld 614cd0452aaSChris Wilson err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); 615cd0452aaSChris Wilson if (err) 616cd0452aaSChris Wilson goto err_ppgtt; 617cd0452aaSChris Wilson 61889351925SChris Wilson err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); 61989351925SChris Wilson if (err) 62089351925SChris Wilson goto err_stash; 62189351925SChris Wilson 6222c86e55dSMatthew Auld /* 6232c86e55dSMatthew Auld * Note we only pre-allocate as far as the end of the global 6242c86e55dSMatthew Auld * GTT. On 48b / 4-level page-tables, the difference is very, 6252c86e55dSMatthew Auld * very significant! We have to preallocate as GVT/vgpu does 6262c86e55dSMatthew Auld * not like the page directory disappearing. 6272c86e55dSMatthew Auld */ 628cd0452aaSChris Wilson ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); 6292c86e55dSMatthew Auld 6302c86e55dSMatthew Auld ggtt->alias = ppgtt; 6312c86e55dSMatthew Auld ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; 6322c86e55dSMatthew Auld 6332c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); 6342c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; 6352c86e55dSMatthew Auld 6362c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); 6372c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; 6382c86e55dSMatthew Auld 639cd0452aaSChris Wilson i915_vm_free_pt_stash(&ppgtt->vm, &stash); 6402c86e55dSMatthew Auld return 0; 6412c86e55dSMatthew Auld 64289351925SChris Wilson err_stash: 64389351925SChris Wilson i915_vm_free_pt_stash(&ppgtt->vm, &stash); 6442c86e55dSMatthew Auld err_ppgtt: 6452c86e55dSMatthew Auld i915_vm_put(&ppgtt->vm); 6462c86e55dSMatthew Auld return err; 6472c86e55dSMatthew Auld } 6482c86e55dSMatthew Auld 6492c86e55dSMatthew Auld static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) 6502c86e55dSMatthew Auld { 6512c86e55dSMatthew Auld struct i915_ppgtt *ppgtt; 6522c86e55dSMatthew Auld 6532c86e55dSMatthew Auld ppgtt = fetch_and_zero(&ggtt->alias); 6542c86e55dSMatthew Auld if (!ppgtt) 6552c86e55dSMatthew Auld return; 6562c86e55dSMatthew Auld 6572c86e55dSMatthew Auld i915_vm_put(&ppgtt->vm); 6582c86e55dSMatthew Auld 6592c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 6602c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 6612c86e55dSMatthew Auld } 6622c86e55dSMatthew Auld 6632c86e55dSMatthew Auld int i915_init_ggtt(struct drm_i915_private *i915) 6642c86e55dSMatthew Auld { 6652c86e55dSMatthew Auld int ret; 6662c86e55dSMatthew Auld 6672c86e55dSMatthew Auld ret = init_ggtt(&i915->ggtt); 6682c86e55dSMatthew Auld if (ret) 6692c86e55dSMatthew Auld return ret; 6702c86e55dSMatthew Auld 6712c86e55dSMatthew Auld if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { 6722c86e55dSMatthew Auld ret = init_aliasing_ppgtt(&i915->ggtt); 6732c86e55dSMatthew Auld if (ret) 6742c86e55dSMatthew Auld cleanup_init_ggtt(&i915->ggtt); 6752c86e55dSMatthew Auld } 6762c86e55dSMatthew Auld 6772c86e55dSMatthew Auld return 0; 6782c86e55dSMatthew Auld } 6792c86e55dSMatthew Auld 6802c86e55dSMatthew Auld static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) 6812c86e55dSMatthew Auld { 6822c86e55dSMatthew Auld struct i915_vma *vma, *vn; 6832c86e55dSMatthew Auld 6842c86e55dSMatthew Auld atomic_set(&ggtt->vm.open, 0); 6852c86e55dSMatthew Auld 6862c86e55dSMatthew Auld rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ 6872c86e55dSMatthew Auld flush_workqueue(ggtt->vm.i915->wq); 6882c86e55dSMatthew Auld 6892c86e55dSMatthew Auld mutex_lock(&ggtt->vm.mutex); 6902c86e55dSMatthew Auld 6912c86e55dSMatthew Auld list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) 6922c86e55dSMatthew Auld WARN_ON(__i915_vma_unbind(vma)); 6932c86e55dSMatthew Auld 6942c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->error_capture)) 6952c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->error_capture); 696742379c0SChris Wilson mutex_destroy(&ggtt->error_mutex); 6972c86e55dSMatthew Auld 6982c86e55dSMatthew Auld ggtt_release_guc_top(ggtt); 6992c86e55dSMatthew Auld intel_vgt_deballoon(ggtt); 7002c86e55dSMatthew Auld 7012c86e55dSMatthew Auld ggtt->vm.cleanup(&ggtt->vm); 7022c86e55dSMatthew Auld 7032c86e55dSMatthew Auld mutex_unlock(&ggtt->vm.mutex); 7042c86e55dSMatthew Auld i915_address_space_fini(&ggtt->vm); 7052c86e55dSMatthew Auld 7062c86e55dSMatthew Auld arch_phys_wc_del(ggtt->mtrr); 7072c86e55dSMatthew Auld 7082c86e55dSMatthew Auld if (ggtt->iomap.size) 7092c86e55dSMatthew Auld io_mapping_fini(&ggtt->iomap); 7102c86e55dSMatthew Auld } 7112c86e55dSMatthew Auld 7122c86e55dSMatthew Auld /** 7132c86e55dSMatthew Auld * i915_ggtt_driver_release - Clean up GGTT hardware initialization 7142c86e55dSMatthew Auld * @i915: i915 device 7152c86e55dSMatthew Auld */ 7162c86e55dSMatthew Auld void i915_ggtt_driver_release(struct drm_i915_private *i915) 7172c86e55dSMatthew Auld { 7180b6bc81dSChris Wilson struct i915_ggtt *ggtt = &i915->ggtt; 7192c86e55dSMatthew Auld 7200b6bc81dSChris Wilson fini_aliasing_ppgtt(ggtt); 7212c86e55dSMatthew Auld 7220b6bc81dSChris Wilson intel_ggtt_fini_fences(ggtt); 7230b6bc81dSChris Wilson ggtt_cleanup_hw(ggtt); 7242c86e55dSMatthew Auld } 7252c86e55dSMatthew Auld 7262c86e55dSMatthew Auld static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 7272c86e55dSMatthew Auld { 7282c86e55dSMatthew Auld snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 7292c86e55dSMatthew Auld snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 7302c86e55dSMatthew Auld return snb_gmch_ctl << 20; 7312c86e55dSMatthew Auld } 7322c86e55dSMatthew Auld 7332c86e55dSMatthew Auld static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 7342c86e55dSMatthew Auld { 7352c86e55dSMatthew Auld bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 7362c86e55dSMatthew Auld bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 7372c86e55dSMatthew Auld if (bdw_gmch_ctl) 7382c86e55dSMatthew Auld bdw_gmch_ctl = 1 << bdw_gmch_ctl; 7392c86e55dSMatthew Auld 7402c86e55dSMatthew Auld #ifdef CONFIG_X86_32 7412c86e55dSMatthew Auld /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ 7422c86e55dSMatthew Auld if (bdw_gmch_ctl > 4) 7432c86e55dSMatthew Auld bdw_gmch_ctl = 4; 7442c86e55dSMatthew Auld #endif 7452c86e55dSMatthew Auld 7462c86e55dSMatthew Auld return bdw_gmch_ctl << 20; 7472c86e55dSMatthew Auld } 7482c86e55dSMatthew Auld 7492c86e55dSMatthew Auld static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 7502c86e55dSMatthew Auld { 7512c86e55dSMatthew Auld gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 7522c86e55dSMatthew Auld gmch_ctrl &= SNB_GMCH_GGMS_MASK; 7532c86e55dSMatthew Auld 7542c86e55dSMatthew Auld if (gmch_ctrl) 7552c86e55dSMatthew Auld return 1 << (20 + gmch_ctrl); 7562c86e55dSMatthew Auld 7572c86e55dSMatthew Auld return 0; 7582c86e55dSMatthew Auld } 7592c86e55dSMatthew Auld 7602c86e55dSMatthew Auld static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) 7612c86e55dSMatthew Auld { 7622c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 7632c86e55dSMatthew Auld struct pci_dev *pdev = i915->drm.pdev; 7642c86e55dSMatthew Auld phys_addr_t phys_addr; 7652c86e55dSMatthew Auld int ret; 7662c86e55dSMatthew Auld 7672c86e55dSMatthew Auld /* For Modern GENs the PTEs and register space are split in the BAR */ 7682c86e55dSMatthew Auld phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; 7692c86e55dSMatthew Auld 7702c86e55dSMatthew Auld /* 7712c86e55dSMatthew Auld * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range 7722c86e55dSMatthew Auld * will be dropped. For WC mappings in general we have 64 byte burst 7732c86e55dSMatthew Auld * writes when the WC buffer is flushed, so we can't use it, but have to 7742c86e55dSMatthew Auld * resort to an uncached mapping. The WC issue is easily caught by the 7752c86e55dSMatthew Auld * readback check when writing GTT PTE entries. 7762c86e55dSMatthew Auld */ 7772c86e55dSMatthew Auld if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) 7789f68e365SLinus Torvalds ggtt->gsm = ioremap(phys_addr, size); 7792c86e55dSMatthew Auld else 7802c86e55dSMatthew Auld ggtt->gsm = ioremap_wc(phys_addr, size); 7812c86e55dSMatthew Auld if (!ggtt->gsm) { 78236034c95SWambui Karuga drm_err(&i915->drm, "Failed to map the ggtt page table\n"); 7832c86e55dSMatthew Auld return -ENOMEM; 7842c86e55dSMatthew Auld } 7852c86e55dSMatthew Auld 78689351925SChris Wilson ret = setup_scratch_page(&ggtt->vm); 7872c86e55dSMatthew Auld if (ret) { 78836034c95SWambui Karuga drm_err(&i915->drm, "Scratch setup failed\n"); 7892c86e55dSMatthew Auld /* iounmap will also get called at remove, but meh */ 7902c86e55dSMatthew Auld iounmap(ggtt->gsm); 7912c86e55dSMatthew Auld return ret; 7922c86e55dSMatthew Auld } 7932c86e55dSMatthew Auld 79489351925SChris Wilson ggtt->vm.scratch[0]->encode = 79589351925SChris Wilson ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), 7962c86e55dSMatthew Auld I915_CACHE_NONE, 0); 7972c86e55dSMatthew Auld 7982c86e55dSMatthew Auld return 0; 7992c86e55dSMatthew Auld } 8002c86e55dSMatthew Auld 8012c86e55dSMatthew Auld int ggtt_set_pages(struct i915_vma *vma) 8022c86e55dSMatthew Auld { 8032c86e55dSMatthew Auld int ret; 8042c86e55dSMatthew Auld 8052c86e55dSMatthew Auld GEM_BUG_ON(vma->pages); 8062c86e55dSMatthew Auld 8072c86e55dSMatthew Auld ret = i915_get_ggtt_vma_pages(vma); 8082c86e55dSMatthew Auld if (ret) 8092c86e55dSMatthew Auld return ret; 8102c86e55dSMatthew Auld 8112c86e55dSMatthew Auld vma->page_sizes = vma->obj->mm.page_sizes; 8122c86e55dSMatthew Auld 8132c86e55dSMatthew Auld return 0; 8142c86e55dSMatthew Auld } 8152c86e55dSMatthew Auld 8162c86e55dSMatthew Auld static void gen6_gmch_remove(struct i915_address_space *vm) 8172c86e55dSMatthew Auld { 8182c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 8192c86e55dSMatthew Auld 8202c86e55dSMatthew Auld iounmap(ggtt->gsm); 82189351925SChris Wilson free_scratch(vm); 8222c86e55dSMatthew Auld } 8232c86e55dSMatthew Auld 8242c86e55dSMatthew Auld static struct resource pci_resource(struct pci_dev *pdev, int bar) 8252c86e55dSMatthew Auld { 8262c86e55dSMatthew Auld return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), 8272c86e55dSMatthew Auld pci_resource_len(pdev, bar)); 8282c86e55dSMatthew Auld } 8292c86e55dSMatthew Auld 8302c86e55dSMatthew Auld static int gen8_gmch_probe(struct i915_ggtt *ggtt) 8312c86e55dSMatthew Auld { 8322c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 8332c86e55dSMatthew Auld struct pci_dev *pdev = i915->drm.pdev; 8342c86e55dSMatthew Auld unsigned int size; 8352c86e55dSMatthew Auld u16 snb_gmch_ctl; 8362c86e55dSMatthew Auld 8372c86e55dSMatthew Auld /* TODO: We're not aware of mappable constraints on gen8 yet */ 8382c86e55dSMatthew Auld if (!IS_DGFX(i915)) { 8392c86e55dSMatthew Auld ggtt->gmadr = pci_resource(pdev, 2); 8402c86e55dSMatthew Auld ggtt->mappable_end = resource_size(&ggtt->gmadr); 8412c86e55dSMatthew Auld } 8422c86e55dSMatthew Auld 8432c86e55dSMatthew Auld pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 8442c86e55dSMatthew Auld if (IS_CHERRYVIEW(i915)) 8452c86e55dSMatthew Auld size = chv_get_total_gtt_size(snb_gmch_ctl); 8462c86e55dSMatthew Auld else 8472c86e55dSMatthew Auld size = gen8_get_total_gtt_size(snb_gmch_ctl); 8482c86e55dSMatthew Auld 84989351925SChris Wilson ggtt->vm.alloc_pt_dma = alloc_pt_dma; 85089351925SChris Wilson 8512c86e55dSMatthew Auld ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; 8522c86e55dSMatthew Auld ggtt->vm.cleanup = gen6_gmch_remove; 8532c86e55dSMatthew Auld ggtt->vm.insert_page = gen8_ggtt_insert_page; 8542c86e55dSMatthew Auld ggtt->vm.clear_range = nop_clear_range; 8552c86e55dSMatthew Auld if (intel_scanout_needs_vtd_wa(i915)) 8562c86e55dSMatthew Auld ggtt->vm.clear_range = gen8_ggtt_clear_range; 8572c86e55dSMatthew Auld 8582c86e55dSMatthew Auld ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 8592c86e55dSMatthew Auld 8602c86e55dSMatthew Auld /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ 8612c86e55dSMatthew Auld if (intel_ggtt_update_needs_vtd_wa(i915) || 8622c86e55dSMatthew Auld IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { 8632c86e55dSMatthew Auld ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; 8642c86e55dSMatthew Auld ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 865a34f61d2SChris Wilson ggtt->vm.bind_async_flags = 866a34f61d2SChris Wilson I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 8672c86e55dSMatthew Auld } 8682c86e55dSMatthew Auld 8692c86e55dSMatthew Auld ggtt->invalidate = gen8_ggtt_invalidate; 8702c86e55dSMatthew Auld 8712c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 8722c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 8732c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 8742c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 8752c86e55dSMatthew Auld 87669edc390SDaniele Ceraolo Spurio ggtt->vm.pte_encode = gen8_ggtt_pte_encode; 8772c86e55dSMatthew Auld 8782c86e55dSMatthew Auld setup_private_pat(ggtt->vm.gt->uncore); 8792c86e55dSMatthew Auld 8802c86e55dSMatthew Auld return ggtt_probe_common(ggtt, size); 8812c86e55dSMatthew Auld } 8822c86e55dSMatthew Auld 8832c86e55dSMatthew Auld static u64 snb_pte_encode(dma_addr_t addr, 8842c86e55dSMatthew Auld enum i915_cache_level level, 8852c86e55dSMatthew Auld u32 flags) 8862c86e55dSMatthew Auld { 8872c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 8882c86e55dSMatthew Auld 8892c86e55dSMatthew Auld switch (level) { 8902c86e55dSMatthew Auld case I915_CACHE_L3_LLC: 8912c86e55dSMatthew Auld case I915_CACHE_LLC: 8922c86e55dSMatthew Auld pte |= GEN6_PTE_CACHE_LLC; 8932c86e55dSMatthew Auld break; 8942c86e55dSMatthew Auld case I915_CACHE_NONE: 8952c86e55dSMatthew Auld pte |= GEN6_PTE_UNCACHED; 8962c86e55dSMatthew Auld break; 8972c86e55dSMatthew Auld default: 8982c86e55dSMatthew Auld MISSING_CASE(level); 8992c86e55dSMatthew Auld } 9002c86e55dSMatthew Auld 9012c86e55dSMatthew Auld return pte; 9022c86e55dSMatthew Auld } 9032c86e55dSMatthew Auld 9042c86e55dSMatthew Auld static u64 ivb_pte_encode(dma_addr_t addr, 9052c86e55dSMatthew Auld enum i915_cache_level level, 9062c86e55dSMatthew Auld u32 flags) 9072c86e55dSMatthew Auld { 9082c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9092c86e55dSMatthew Auld 9102c86e55dSMatthew Auld switch (level) { 9112c86e55dSMatthew Auld case I915_CACHE_L3_LLC: 9122c86e55dSMatthew Auld pte |= GEN7_PTE_CACHE_L3_LLC; 9132c86e55dSMatthew Auld break; 9142c86e55dSMatthew Auld case I915_CACHE_LLC: 9152c86e55dSMatthew Auld pte |= GEN6_PTE_CACHE_LLC; 9162c86e55dSMatthew Auld break; 9172c86e55dSMatthew Auld case I915_CACHE_NONE: 9182c86e55dSMatthew Auld pte |= GEN6_PTE_UNCACHED; 9192c86e55dSMatthew Auld break; 9202c86e55dSMatthew Auld default: 9212c86e55dSMatthew Auld MISSING_CASE(level); 9222c86e55dSMatthew Auld } 9232c86e55dSMatthew Auld 9242c86e55dSMatthew Auld return pte; 9252c86e55dSMatthew Auld } 9262c86e55dSMatthew Auld 9272c86e55dSMatthew Auld static u64 byt_pte_encode(dma_addr_t addr, 9282c86e55dSMatthew Auld enum i915_cache_level level, 9292c86e55dSMatthew Auld u32 flags) 9302c86e55dSMatthew Auld { 9312c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9322c86e55dSMatthew Auld 9332c86e55dSMatthew Auld if (!(flags & PTE_READ_ONLY)) 9342c86e55dSMatthew Auld pte |= BYT_PTE_WRITEABLE; 9352c86e55dSMatthew Auld 9362c86e55dSMatthew Auld if (level != I915_CACHE_NONE) 9372c86e55dSMatthew Auld pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 9382c86e55dSMatthew Auld 9392c86e55dSMatthew Auld return pte; 9402c86e55dSMatthew Auld } 9412c86e55dSMatthew Auld 9422c86e55dSMatthew Auld static u64 hsw_pte_encode(dma_addr_t addr, 9432c86e55dSMatthew Auld enum i915_cache_level level, 9442c86e55dSMatthew Auld u32 flags) 9452c86e55dSMatthew Auld { 9462c86e55dSMatthew Auld gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9472c86e55dSMatthew Auld 9482c86e55dSMatthew Auld if (level != I915_CACHE_NONE) 9492c86e55dSMatthew Auld pte |= HSW_WB_LLC_AGE3; 9502c86e55dSMatthew Auld 9512c86e55dSMatthew Auld return pte; 9522c86e55dSMatthew Auld } 9532c86e55dSMatthew Auld 9542c86e55dSMatthew Auld static u64 iris_pte_encode(dma_addr_t addr, 9552c86e55dSMatthew Auld enum i915_cache_level level, 9562c86e55dSMatthew Auld u32 flags) 9572c86e55dSMatthew Auld { 9582c86e55dSMatthew Auld gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9592c86e55dSMatthew Auld 9602c86e55dSMatthew Auld switch (level) { 9612c86e55dSMatthew Auld case I915_CACHE_NONE: 9622c86e55dSMatthew Auld break; 9632c86e55dSMatthew Auld case I915_CACHE_WT: 9642c86e55dSMatthew Auld pte |= HSW_WT_ELLC_LLC_AGE3; 9652c86e55dSMatthew Auld break; 9662c86e55dSMatthew Auld default: 9672c86e55dSMatthew Auld pte |= HSW_WB_ELLC_LLC_AGE3; 9682c86e55dSMatthew Auld break; 9692c86e55dSMatthew Auld } 9702c86e55dSMatthew Auld 9712c86e55dSMatthew Auld return pte; 9722c86e55dSMatthew Auld } 9732c86e55dSMatthew Auld 9742c86e55dSMatthew Auld static int gen6_gmch_probe(struct i915_ggtt *ggtt) 9752c86e55dSMatthew Auld { 9762c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 9772c86e55dSMatthew Auld struct pci_dev *pdev = i915->drm.pdev; 9782c86e55dSMatthew Auld unsigned int size; 9792c86e55dSMatthew Auld u16 snb_gmch_ctl; 9802c86e55dSMatthew Auld 9812c86e55dSMatthew Auld ggtt->gmadr = pci_resource(pdev, 2); 9822c86e55dSMatthew Auld ggtt->mappable_end = resource_size(&ggtt->gmadr); 9832c86e55dSMatthew Auld 9842c86e55dSMatthew Auld /* 9852c86e55dSMatthew Auld * 64/512MB is the current min/max we actually know of, but this is 9862c86e55dSMatthew Auld * just a coarse sanity check. 9872c86e55dSMatthew Auld */ 9882c86e55dSMatthew Auld if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { 98936034c95SWambui Karuga drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", 99036034c95SWambui Karuga &ggtt->mappable_end); 9912c86e55dSMatthew Auld return -ENXIO; 9922c86e55dSMatthew Auld } 9932c86e55dSMatthew Auld 9942c86e55dSMatthew Auld pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 9952c86e55dSMatthew Auld 9962c86e55dSMatthew Auld size = gen6_get_total_gtt_size(snb_gmch_ctl); 9972c86e55dSMatthew Auld ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; 9982c86e55dSMatthew Auld 99989351925SChris Wilson ggtt->vm.alloc_pt_dma = alloc_pt_dma; 100089351925SChris Wilson 10012c86e55dSMatthew Auld ggtt->vm.clear_range = nop_clear_range; 10022c86e55dSMatthew Auld if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) 10032c86e55dSMatthew Auld ggtt->vm.clear_range = gen6_ggtt_clear_range; 10042c86e55dSMatthew Auld ggtt->vm.insert_page = gen6_ggtt_insert_page; 10052c86e55dSMatthew Auld ggtt->vm.insert_entries = gen6_ggtt_insert_entries; 10062c86e55dSMatthew Auld ggtt->vm.cleanup = gen6_gmch_remove; 10072c86e55dSMatthew Auld 10082c86e55dSMatthew Auld ggtt->invalidate = gen6_ggtt_invalidate; 10092c86e55dSMatthew Auld 10102c86e55dSMatthew Auld if (HAS_EDRAM(i915)) 10112c86e55dSMatthew Auld ggtt->vm.pte_encode = iris_pte_encode; 10122c86e55dSMatthew Auld else if (IS_HASWELL(i915)) 10132c86e55dSMatthew Auld ggtt->vm.pte_encode = hsw_pte_encode; 10142c86e55dSMatthew Auld else if (IS_VALLEYVIEW(i915)) 10152c86e55dSMatthew Auld ggtt->vm.pte_encode = byt_pte_encode; 10162c86e55dSMatthew Auld else if (INTEL_GEN(i915) >= 7) 10172c86e55dSMatthew Auld ggtt->vm.pte_encode = ivb_pte_encode; 10182c86e55dSMatthew Auld else 10192c86e55dSMatthew Auld ggtt->vm.pte_encode = snb_pte_encode; 10202c86e55dSMatthew Auld 10212c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 10222c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 10232c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 10242c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 10252c86e55dSMatthew Auld 10262c86e55dSMatthew Auld return ggtt_probe_common(ggtt, size); 10272c86e55dSMatthew Auld } 10282c86e55dSMatthew Auld 10292c86e55dSMatthew Auld static void i915_gmch_remove(struct i915_address_space *vm) 10302c86e55dSMatthew Auld { 10312c86e55dSMatthew Auld intel_gmch_remove(); 10322c86e55dSMatthew Auld } 10332c86e55dSMatthew Auld 10342c86e55dSMatthew Auld static int i915_gmch_probe(struct i915_ggtt *ggtt) 10352c86e55dSMatthew Auld { 10362c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 10372c86e55dSMatthew Auld phys_addr_t gmadr_base; 10382c86e55dSMatthew Auld int ret; 10392c86e55dSMatthew Auld 10402c86e55dSMatthew Auld ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL); 10412c86e55dSMatthew Auld if (!ret) { 104236034c95SWambui Karuga drm_err(&i915->drm, "failed to set up gmch\n"); 10432c86e55dSMatthew Auld return -EIO; 10442c86e55dSMatthew Auld } 10452c86e55dSMatthew Auld 10462c86e55dSMatthew Auld intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); 10472c86e55dSMatthew Auld 10482c86e55dSMatthew Auld ggtt->gmadr = 10492c86e55dSMatthew Auld (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); 10502c86e55dSMatthew Auld 105189351925SChris Wilson ggtt->vm.alloc_pt_dma = alloc_pt_dma; 105289351925SChris Wilson 10532c86e55dSMatthew Auld ggtt->do_idle_maps = needs_idle_maps(i915); 10542c86e55dSMatthew Auld ggtt->vm.insert_page = i915_ggtt_insert_page; 10552c86e55dSMatthew Auld ggtt->vm.insert_entries = i915_ggtt_insert_entries; 10562c86e55dSMatthew Auld ggtt->vm.clear_range = i915_ggtt_clear_range; 10572c86e55dSMatthew Auld ggtt->vm.cleanup = i915_gmch_remove; 10582c86e55dSMatthew Auld 10592c86e55dSMatthew Auld ggtt->invalidate = gmch_ggtt_invalidate; 10602c86e55dSMatthew Auld 10612c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 10622c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 10632c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 10642c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 10652c86e55dSMatthew Auld 10662c86e55dSMatthew Auld if (unlikely(ggtt->do_idle_maps)) 1067dc483ba5SJani Nikula drm_notice(&i915->drm, 10682c86e55dSMatthew Auld "Applying Ironlake quirks for intel_iommu\n"); 10692c86e55dSMatthew Auld 10702c86e55dSMatthew Auld return 0; 10712c86e55dSMatthew Auld } 10722c86e55dSMatthew Auld 10732c86e55dSMatthew Auld static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) 10742c86e55dSMatthew Auld { 10752c86e55dSMatthew Auld struct drm_i915_private *i915 = gt->i915; 10762c86e55dSMatthew Auld int ret; 10772c86e55dSMatthew Auld 10782c86e55dSMatthew Auld ggtt->vm.gt = gt; 10792c86e55dSMatthew Auld ggtt->vm.i915 = i915; 10802c86e55dSMatthew Auld ggtt->vm.dma = &i915->drm.pdev->dev; 10812c86e55dSMatthew Auld 10822c86e55dSMatthew Auld if (INTEL_GEN(i915) <= 5) 10832c86e55dSMatthew Auld ret = i915_gmch_probe(ggtt); 10842c86e55dSMatthew Auld else if (INTEL_GEN(i915) < 8) 10852c86e55dSMatthew Auld ret = gen6_gmch_probe(ggtt); 10862c86e55dSMatthew Auld else 10872c86e55dSMatthew Auld ret = gen8_gmch_probe(ggtt); 10882c86e55dSMatthew Auld if (ret) 10892c86e55dSMatthew Auld return ret; 10902c86e55dSMatthew Auld 10912c86e55dSMatthew Auld if ((ggtt->vm.total - 1) >> 32) { 109236034c95SWambui Karuga drm_err(&i915->drm, 109336034c95SWambui Karuga "We never expected a Global GTT with more than 32bits" 10942c86e55dSMatthew Auld " of address space! Found %lldM!\n", 10952c86e55dSMatthew Auld ggtt->vm.total >> 20); 10962c86e55dSMatthew Auld ggtt->vm.total = 1ULL << 32; 10972c86e55dSMatthew Auld ggtt->mappable_end = 10982c86e55dSMatthew Auld min_t(u64, ggtt->mappable_end, ggtt->vm.total); 10992c86e55dSMatthew Auld } 11002c86e55dSMatthew Auld 11012c86e55dSMatthew Auld if (ggtt->mappable_end > ggtt->vm.total) { 110236034c95SWambui Karuga drm_err(&i915->drm, 110336034c95SWambui Karuga "mappable aperture extends past end of GGTT," 11042c86e55dSMatthew Auld " aperture=%pa, total=%llx\n", 11052c86e55dSMatthew Auld &ggtt->mappable_end, ggtt->vm.total); 11062c86e55dSMatthew Auld ggtt->mappable_end = ggtt->vm.total; 11072c86e55dSMatthew Auld } 11082c86e55dSMatthew Auld 11092c86e55dSMatthew Auld /* GMADR is the PCI mmio aperture into the global GTT. */ 111036034c95SWambui Karuga drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); 111136034c95SWambui Karuga drm_dbg(&i915->drm, "GMADR size = %lluM\n", 111236034c95SWambui Karuga (u64)ggtt->mappable_end >> 20); 111336034c95SWambui Karuga drm_dbg(&i915->drm, "DSM size = %lluM\n", 11142c86e55dSMatthew Auld (u64)resource_size(&intel_graphics_stolen_res) >> 20); 11152c86e55dSMatthew Auld 11162c86e55dSMatthew Auld return 0; 11172c86e55dSMatthew Auld } 11182c86e55dSMatthew Auld 11192c86e55dSMatthew Auld /** 11202c86e55dSMatthew Auld * i915_ggtt_probe_hw - Probe GGTT hardware location 11212c86e55dSMatthew Auld * @i915: i915 device 11222c86e55dSMatthew Auld */ 11232c86e55dSMatthew Auld int i915_ggtt_probe_hw(struct drm_i915_private *i915) 11242c86e55dSMatthew Auld { 11252c86e55dSMatthew Auld int ret; 11262c86e55dSMatthew Auld 11272c86e55dSMatthew Auld ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); 11282c86e55dSMatthew Auld if (ret) 11292c86e55dSMatthew Auld return ret; 11302c86e55dSMatthew Auld 11312c86e55dSMatthew Auld if (intel_vtd_active()) 1132dc483ba5SJani Nikula drm_info(&i915->drm, "VT-d active for gfx access\n"); 11332c86e55dSMatthew Auld 11342c86e55dSMatthew Auld return 0; 11352c86e55dSMatthew Auld } 11362c86e55dSMatthew Auld 11372c86e55dSMatthew Auld int i915_ggtt_enable_hw(struct drm_i915_private *i915) 11382c86e55dSMatthew Auld { 11392c86e55dSMatthew Auld if (INTEL_GEN(i915) < 6 && !intel_enable_gtt()) 11402c86e55dSMatthew Auld return -EIO; 11412c86e55dSMatthew Auld 11422c86e55dSMatthew Auld return 0; 11432c86e55dSMatthew Auld } 11442c86e55dSMatthew Auld 11452c86e55dSMatthew Auld void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) 11462c86e55dSMatthew Auld { 11472c86e55dSMatthew Auld GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); 11482c86e55dSMatthew Auld 11492c86e55dSMatthew Auld ggtt->invalidate = guc_ggtt_invalidate; 11502c86e55dSMatthew Auld 11512c86e55dSMatthew Auld ggtt->invalidate(ggtt); 11522c86e55dSMatthew Auld } 11532c86e55dSMatthew Auld 11542c86e55dSMatthew Auld void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) 11552c86e55dSMatthew Auld { 11562c86e55dSMatthew Auld /* XXX Temporary pardon for error unload */ 11572c86e55dSMatthew Auld if (ggtt->invalidate == gen8_ggtt_invalidate) 11582c86e55dSMatthew Auld return; 11592c86e55dSMatthew Auld 11602c86e55dSMatthew Auld /* We should only be called after i915_ggtt_enable_guc() */ 11612c86e55dSMatthew Auld GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); 11622c86e55dSMatthew Auld 11632c86e55dSMatthew Auld ggtt->invalidate = gen8_ggtt_invalidate; 11642c86e55dSMatthew Auld 11652c86e55dSMatthew Auld ggtt->invalidate(ggtt); 11662c86e55dSMatthew Auld } 11672c86e55dSMatthew Auld 1168e986209cSChris Wilson void i915_ggtt_resume(struct i915_ggtt *ggtt) 11692c86e55dSMatthew Auld { 117080e5351dSChris Wilson struct i915_vma *vma; 11712c86e55dSMatthew Auld bool flush = false; 11722c86e55dSMatthew Auld int open; 11732c86e55dSMatthew Auld 11742c86e55dSMatthew Auld intel_gt_check_and_clear_faults(ggtt->vm.gt); 11752c86e55dSMatthew Auld 11762c86e55dSMatthew Auld /* First fill our portion of the GTT with scratch pages */ 11772c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 11782c86e55dSMatthew Auld 11792c86e55dSMatthew Auld /* Skip rewriting PTE on VMA unbind. */ 11802c86e55dSMatthew Auld open = atomic_xchg(&ggtt->vm.open, 0); 11812c86e55dSMatthew Auld 11822c86e55dSMatthew Auld /* clflush objects bound into the GGTT and rebind them. */ 118380e5351dSChris Wilson list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { 11842c86e55dSMatthew Auld struct drm_i915_gem_object *obj = vma->obj; 1185cd0452aaSChris Wilson unsigned int was_bound = 1186cd0452aaSChris Wilson atomic_read(&vma->flags) & I915_VMA_BIND_MASK; 11872c86e55dSMatthew Auld 1188cd0452aaSChris Wilson GEM_BUG_ON(!was_bound); 1189cd0452aaSChris Wilson vma->ops->bind_vma(&ggtt->vm, NULL, vma, 11902c86e55dSMatthew Auld obj ? obj->cache_level : 0, 1191cd0452aaSChris Wilson was_bound); 11922c86e55dSMatthew Auld if (obj) { /* only used during resume => exclusive access */ 11932c86e55dSMatthew Auld flush |= fetch_and_zero(&obj->write_domain); 11942c86e55dSMatthew Auld obj->read_domains |= I915_GEM_DOMAIN_GTT; 11952c86e55dSMatthew Auld } 11962c86e55dSMatthew Auld } 11972c86e55dSMatthew Auld 11982c86e55dSMatthew Auld atomic_set(&ggtt->vm.open, open); 11992c86e55dSMatthew Auld ggtt->invalidate(ggtt); 12002c86e55dSMatthew Auld 12012c86e55dSMatthew Auld if (flush) 12022c86e55dSMatthew Auld wbinvd_on_all_cpus(); 12032c86e55dSMatthew Auld 1204e986209cSChris Wilson if (INTEL_GEN(ggtt->vm.i915) >= 8) 12052c86e55dSMatthew Auld setup_private_pat(ggtt->vm.gt->uncore); 1206dec9cf9eSChris Wilson 1207dec9cf9eSChris Wilson intel_ggtt_restore_fences(ggtt); 12082c86e55dSMatthew Auld } 12092c86e55dSMatthew Auld 12102c86e55dSMatthew Auld static struct scatterlist * 12112c86e55dSMatthew Auld rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 12122c86e55dSMatthew Auld unsigned int width, unsigned int height, 12132c86e55dSMatthew Auld unsigned int stride, 12142c86e55dSMatthew Auld struct sg_table *st, struct scatterlist *sg) 12152c86e55dSMatthew Auld { 12162c86e55dSMatthew Auld unsigned int column, row; 12172c86e55dSMatthew Auld unsigned int src_idx; 12182c86e55dSMatthew Auld 12192c86e55dSMatthew Auld for (column = 0; column < width; column++) { 12202c86e55dSMatthew Auld src_idx = stride * (height - 1) + column + offset; 12212c86e55dSMatthew Auld for (row = 0; row < height; row++) { 12222c86e55dSMatthew Auld st->nents++; 12232c86e55dSMatthew Auld /* 12242c86e55dSMatthew Auld * We don't need the pages, but need to initialize 12252c86e55dSMatthew Auld * the entries so the sg list can be happily traversed. 12262c86e55dSMatthew Auld * The only thing we need are DMA addresses. 12272c86e55dSMatthew Auld */ 12282c86e55dSMatthew Auld sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 12292c86e55dSMatthew Auld sg_dma_address(sg) = 12302c86e55dSMatthew Auld i915_gem_object_get_dma_address(obj, src_idx); 12312c86e55dSMatthew Auld sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 12322c86e55dSMatthew Auld sg = sg_next(sg); 12332c86e55dSMatthew Auld src_idx -= stride; 12342c86e55dSMatthew Auld } 12352c86e55dSMatthew Auld } 12362c86e55dSMatthew Auld 12372c86e55dSMatthew Auld return sg; 12382c86e55dSMatthew Auld } 12392c86e55dSMatthew Auld 12402c86e55dSMatthew Auld static noinline struct sg_table * 12412c86e55dSMatthew Auld intel_rotate_pages(struct intel_rotation_info *rot_info, 12422c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 12432c86e55dSMatthew Auld { 12442c86e55dSMatthew Auld unsigned int size = intel_rotation_info_size(rot_info); 124552ce7074SWambui Karuga struct drm_i915_private *i915 = to_i915(obj->base.dev); 12462c86e55dSMatthew Auld struct sg_table *st; 12472c86e55dSMatthew Auld struct scatterlist *sg; 12482c86e55dSMatthew Auld int ret = -ENOMEM; 12492c86e55dSMatthew Auld int i; 12502c86e55dSMatthew Auld 12512c86e55dSMatthew Auld /* Allocate target SG list. */ 12522c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 12532c86e55dSMatthew Auld if (!st) 12542c86e55dSMatthew Auld goto err_st_alloc; 12552c86e55dSMatthew Auld 12562c86e55dSMatthew Auld ret = sg_alloc_table(st, size, GFP_KERNEL); 12572c86e55dSMatthew Auld if (ret) 12582c86e55dSMatthew Auld goto err_sg_alloc; 12592c86e55dSMatthew Auld 12602c86e55dSMatthew Auld st->nents = 0; 12612c86e55dSMatthew Auld sg = st->sgl; 12622c86e55dSMatthew Auld 12632c86e55dSMatthew Auld for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { 12642c86e55dSMatthew Auld sg = rotate_pages(obj, rot_info->plane[i].offset, 12652c86e55dSMatthew Auld rot_info->plane[i].width, rot_info->plane[i].height, 12662c86e55dSMatthew Auld rot_info->plane[i].stride, st, sg); 12672c86e55dSMatthew Auld } 12682c86e55dSMatthew Auld 12692c86e55dSMatthew Auld return st; 12702c86e55dSMatthew Auld 12712c86e55dSMatthew Auld err_sg_alloc: 12722c86e55dSMatthew Auld kfree(st); 12732c86e55dSMatthew Auld err_st_alloc: 12742c86e55dSMatthew Auld 127552ce7074SWambui Karuga drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 127652ce7074SWambui Karuga obj->base.size, rot_info->plane[0].width, 127752ce7074SWambui Karuga rot_info->plane[0].height, size); 12782c86e55dSMatthew Auld 12792c86e55dSMatthew Auld return ERR_PTR(ret); 12802c86e55dSMatthew Auld } 12812c86e55dSMatthew Auld 12822c86e55dSMatthew Auld static struct scatterlist * 12832c86e55dSMatthew Auld remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, 12842c86e55dSMatthew Auld unsigned int width, unsigned int height, 12852c86e55dSMatthew Auld unsigned int stride, 12862c86e55dSMatthew Auld struct sg_table *st, struct scatterlist *sg) 12872c86e55dSMatthew Auld { 12882c86e55dSMatthew Auld unsigned int row; 12892c86e55dSMatthew Auld 12902c86e55dSMatthew Auld for (row = 0; row < height; row++) { 12912c86e55dSMatthew Auld unsigned int left = width * I915_GTT_PAGE_SIZE; 12922c86e55dSMatthew Auld 12932c86e55dSMatthew Auld while (left) { 12942c86e55dSMatthew Auld dma_addr_t addr; 12952c86e55dSMatthew Auld unsigned int length; 12962c86e55dSMatthew Auld 12972c86e55dSMatthew Auld /* 12982c86e55dSMatthew Auld * We don't need the pages, but need to initialize 12992c86e55dSMatthew Auld * the entries so the sg list can be happily traversed. 13002c86e55dSMatthew Auld * The only thing we need are DMA addresses. 13012c86e55dSMatthew Auld */ 13022c86e55dSMatthew Auld 13032c86e55dSMatthew Auld addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 13042c86e55dSMatthew Auld 13052c86e55dSMatthew Auld length = min(left, length); 13062c86e55dSMatthew Auld 13072c86e55dSMatthew Auld st->nents++; 13082c86e55dSMatthew Auld 13092c86e55dSMatthew Auld sg_set_page(sg, NULL, length, 0); 13102c86e55dSMatthew Auld sg_dma_address(sg) = addr; 13112c86e55dSMatthew Auld sg_dma_len(sg) = length; 13122c86e55dSMatthew Auld sg = sg_next(sg); 13132c86e55dSMatthew Auld 13142c86e55dSMatthew Auld offset += length / I915_GTT_PAGE_SIZE; 13152c86e55dSMatthew Auld left -= length; 13162c86e55dSMatthew Auld } 13172c86e55dSMatthew Auld 13182c86e55dSMatthew Auld offset += stride - width; 13192c86e55dSMatthew Auld } 13202c86e55dSMatthew Auld 13212c86e55dSMatthew Auld return sg; 13222c86e55dSMatthew Auld } 13232c86e55dSMatthew Auld 13242c86e55dSMatthew Auld static noinline struct sg_table * 13252c86e55dSMatthew Auld intel_remap_pages(struct intel_remapped_info *rem_info, 13262c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 13272c86e55dSMatthew Auld { 13282c86e55dSMatthew Auld unsigned int size = intel_remapped_info_size(rem_info); 132952ce7074SWambui Karuga struct drm_i915_private *i915 = to_i915(obj->base.dev); 13302c86e55dSMatthew Auld struct sg_table *st; 13312c86e55dSMatthew Auld struct scatterlist *sg; 13322c86e55dSMatthew Auld int ret = -ENOMEM; 13332c86e55dSMatthew Auld int i; 13342c86e55dSMatthew Auld 13352c86e55dSMatthew Auld /* Allocate target SG list. */ 13362c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 13372c86e55dSMatthew Auld if (!st) 13382c86e55dSMatthew Auld goto err_st_alloc; 13392c86e55dSMatthew Auld 13402c86e55dSMatthew Auld ret = sg_alloc_table(st, size, GFP_KERNEL); 13412c86e55dSMatthew Auld if (ret) 13422c86e55dSMatthew Auld goto err_sg_alloc; 13432c86e55dSMatthew Auld 13442c86e55dSMatthew Auld st->nents = 0; 13452c86e55dSMatthew Auld sg = st->sgl; 13462c86e55dSMatthew Auld 13472c86e55dSMatthew Auld for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 13482c86e55dSMatthew Auld sg = remap_pages(obj, rem_info->plane[i].offset, 13492c86e55dSMatthew Auld rem_info->plane[i].width, rem_info->plane[i].height, 13502c86e55dSMatthew Auld rem_info->plane[i].stride, st, sg); 13512c86e55dSMatthew Auld } 13522c86e55dSMatthew Auld 13532c86e55dSMatthew Auld i915_sg_trim(st); 13542c86e55dSMatthew Auld 13552c86e55dSMatthew Auld return st; 13562c86e55dSMatthew Auld 13572c86e55dSMatthew Auld err_sg_alloc: 13582c86e55dSMatthew Auld kfree(st); 13592c86e55dSMatthew Auld err_st_alloc: 13602c86e55dSMatthew Auld 136152ce7074SWambui Karuga drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 136252ce7074SWambui Karuga obj->base.size, rem_info->plane[0].width, 136352ce7074SWambui Karuga rem_info->plane[0].height, size); 13642c86e55dSMatthew Auld 13652c86e55dSMatthew Auld return ERR_PTR(ret); 13662c86e55dSMatthew Auld } 13672c86e55dSMatthew Auld 13682c86e55dSMatthew Auld static noinline struct sg_table * 13692c86e55dSMatthew Auld intel_partial_pages(const struct i915_ggtt_view *view, 13702c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 13712c86e55dSMatthew Auld { 13722c86e55dSMatthew Auld struct sg_table *st; 13732c86e55dSMatthew Auld struct scatterlist *sg, *iter; 13742c86e55dSMatthew Auld unsigned int count = view->partial.size; 13752c86e55dSMatthew Auld unsigned int offset; 13762c86e55dSMatthew Auld int ret = -ENOMEM; 13772c86e55dSMatthew Auld 13782c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 13792c86e55dSMatthew Auld if (!st) 13802c86e55dSMatthew Auld goto err_st_alloc; 13812c86e55dSMatthew Auld 13822c86e55dSMatthew Auld ret = sg_alloc_table(st, count, GFP_KERNEL); 13832c86e55dSMatthew Auld if (ret) 13842c86e55dSMatthew Auld goto err_sg_alloc; 13852c86e55dSMatthew Auld 1386*934941edSTvrtko Ursulin iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); 13872c86e55dSMatthew Auld GEM_BUG_ON(!iter); 13882c86e55dSMatthew Auld 13892c86e55dSMatthew Auld sg = st->sgl; 13902c86e55dSMatthew Auld st->nents = 0; 13912c86e55dSMatthew Auld do { 13922c86e55dSMatthew Auld unsigned int len; 13932c86e55dSMatthew Auld 1394*934941edSTvrtko Ursulin len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 13952c86e55dSMatthew Auld count << PAGE_SHIFT); 13962c86e55dSMatthew Auld sg_set_page(sg, NULL, len, 0); 13972c86e55dSMatthew Auld sg_dma_address(sg) = 13982c86e55dSMatthew Auld sg_dma_address(iter) + (offset << PAGE_SHIFT); 13992c86e55dSMatthew Auld sg_dma_len(sg) = len; 14002c86e55dSMatthew Auld 14012c86e55dSMatthew Auld st->nents++; 14022c86e55dSMatthew Auld count -= len >> PAGE_SHIFT; 14032c86e55dSMatthew Auld if (count == 0) { 14042c86e55dSMatthew Auld sg_mark_end(sg); 14052c86e55dSMatthew Auld i915_sg_trim(st); /* Drop any unused tail entries. */ 14062c86e55dSMatthew Auld 14072c86e55dSMatthew Auld return st; 14082c86e55dSMatthew Auld } 14092c86e55dSMatthew Auld 14102c86e55dSMatthew Auld sg = __sg_next(sg); 14112c86e55dSMatthew Auld iter = __sg_next(iter); 14122c86e55dSMatthew Auld offset = 0; 14132c86e55dSMatthew Auld } while (1); 14142c86e55dSMatthew Auld 14152c86e55dSMatthew Auld err_sg_alloc: 14162c86e55dSMatthew Auld kfree(st); 14172c86e55dSMatthew Auld err_st_alloc: 14182c86e55dSMatthew Auld return ERR_PTR(ret); 14192c86e55dSMatthew Auld } 14202c86e55dSMatthew Auld 14212c86e55dSMatthew Auld static int 14222c86e55dSMatthew Auld i915_get_ggtt_vma_pages(struct i915_vma *vma) 14232c86e55dSMatthew Auld { 14242c86e55dSMatthew Auld int ret; 14252c86e55dSMatthew Auld 14262c86e55dSMatthew Auld /* 14272c86e55dSMatthew Auld * The vma->pages are only valid within the lifespan of the borrowed 14282c86e55dSMatthew Auld * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 14292c86e55dSMatthew Auld * must be the vma->pages. A simple rule is that vma->pages must only 14302c86e55dSMatthew Auld * be accessed when the obj->mm.pages are pinned. 14312c86e55dSMatthew Auld */ 14322c86e55dSMatthew Auld GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 14332c86e55dSMatthew Auld 14342c86e55dSMatthew Auld switch (vma->ggtt_view.type) { 14352c86e55dSMatthew Auld default: 14362c86e55dSMatthew Auld GEM_BUG_ON(vma->ggtt_view.type); 14372c86e55dSMatthew Auld /* fall through */ 14382c86e55dSMatthew Auld case I915_GGTT_VIEW_NORMAL: 14392c86e55dSMatthew Auld vma->pages = vma->obj->mm.pages; 14402c86e55dSMatthew Auld return 0; 14412c86e55dSMatthew Auld 14422c86e55dSMatthew Auld case I915_GGTT_VIEW_ROTATED: 14432c86e55dSMatthew Auld vma->pages = 14442c86e55dSMatthew Auld intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 14452c86e55dSMatthew Auld break; 14462c86e55dSMatthew Auld 14472c86e55dSMatthew Auld case I915_GGTT_VIEW_REMAPPED: 14482c86e55dSMatthew Auld vma->pages = 14492c86e55dSMatthew Auld intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); 14502c86e55dSMatthew Auld break; 14512c86e55dSMatthew Auld 14522c86e55dSMatthew Auld case I915_GGTT_VIEW_PARTIAL: 14532c86e55dSMatthew Auld vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 14542c86e55dSMatthew Auld break; 14552c86e55dSMatthew Auld } 14562c86e55dSMatthew Auld 14572c86e55dSMatthew Auld ret = 0; 14582c86e55dSMatthew Auld if (IS_ERR(vma->pages)) { 14592c86e55dSMatthew Auld ret = PTR_ERR(vma->pages); 14602c86e55dSMatthew Auld vma->pages = NULL; 146152ce7074SWambui Karuga drm_err(&vma->vm->i915->drm, 146252ce7074SWambui Karuga "Failed to get pages for VMA view type %u (%d)!\n", 14632c86e55dSMatthew Auld vma->ggtt_view.type, ret); 14642c86e55dSMatthew Auld } 14652c86e55dSMatthew Auld return ret; 14662c86e55dSMatthew Auld } 1467