12c86e55dSMatthew Auld // SPDX-License-Identifier: MIT 22c86e55dSMatthew Auld /* 32c86e55dSMatthew Auld * Copyright © 2020 Intel Corporation 42c86e55dSMatthew Auld */ 52c86e55dSMatthew Auld 62c86e55dSMatthew Auld #include <linux/stop_machine.h> 72c86e55dSMatthew Auld 82c86e55dSMatthew Auld #include <asm/set_memory.h> 98801eb48SChen Zhou #include <asm/smp.h> 102c86e55dSMatthew Auld 112c86e55dSMatthew Auld #include "intel_gt.h" 122c86e55dSMatthew Auld #include "i915_drv.h" 132c86e55dSMatthew Auld #include "i915_scatterlist.h" 142c86e55dSMatthew Auld #include "i915_vgpu.h" 152c86e55dSMatthew Auld 162c86e55dSMatthew Auld #include "intel_gtt.h" 172c86e55dSMatthew Auld 182c86e55dSMatthew Auld static int 192c86e55dSMatthew Auld i915_get_ggtt_vma_pages(struct i915_vma *vma); 202c86e55dSMatthew Auld 212c86e55dSMatthew Auld static void i915_ggtt_color_adjust(const struct drm_mm_node *node, 222c86e55dSMatthew Auld unsigned long color, 232c86e55dSMatthew Auld u64 *start, 242c86e55dSMatthew Auld u64 *end) 252c86e55dSMatthew Auld { 262c86e55dSMatthew Auld if (i915_node_color_differs(node, color)) 272c86e55dSMatthew Auld *start += I915_GTT_PAGE_SIZE; 282c86e55dSMatthew Auld 292c86e55dSMatthew Auld /* 302c86e55dSMatthew Auld * Also leave a space between the unallocated reserved node after the 312c86e55dSMatthew Auld * GTT and any objects within the GTT, i.e. we use the color adjustment 322c86e55dSMatthew Auld * to insert a guard page to prevent prefetches crossing over the 332c86e55dSMatthew Auld * GTT boundary. 342c86e55dSMatthew Auld */ 352c86e55dSMatthew Auld node = list_next_entry(node, node_list); 362c86e55dSMatthew Auld if (node->color != color) 372c86e55dSMatthew Auld *end -= I915_GTT_PAGE_SIZE; 382c86e55dSMatthew Auld } 392c86e55dSMatthew Auld 402c86e55dSMatthew Auld static int ggtt_init_hw(struct i915_ggtt *ggtt) 412c86e55dSMatthew Auld { 422c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 432c86e55dSMatthew Auld 442c86e55dSMatthew Auld i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); 452c86e55dSMatthew Auld 462c86e55dSMatthew Auld ggtt->vm.is_ggtt = true; 472c86e55dSMatthew Auld 482c86e55dSMatthew Auld /* Only VLV supports read-only GGTT mappings */ 492c86e55dSMatthew Auld ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); 502c86e55dSMatthew Auld 512c86e55dSMatthew Auld if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) 522c86e55dSMatthew Auld ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; 532c86e55dSMatthew Auld 542c86e55dSMatthew Auld if (ggtt->mappable_end) { 552c86e55dSMatthew Auld if (!io_mapping_init_wc(&ggtt->iomap, 562c86e55dSMatthew Auld ggtt->gmadr.start, 572c86e55dSMatthew Auld ggtt->mappable_end)) { 582c86e55dSMatthew Auld ggtt->vm.cleanup(&ggtt->vm); 592c86e55dSMatthew Auld return -EIO; 602c86e55dSMatthew Auld } 612c86e55dSMatthew Auld 622c86e55dSMatthew Auld ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, 632c86e55dSMatthew Auld ggtt->mappable_end); 642c86e55dSMatthew Auld } 652c86e55dSMatthew Auld 662c86e55dSMatthew Auld i915_ggtt_init_fences(ggtt); 672c86e55dSMatthew Auld 682c86e55dSMatthew Auld return 0; 692c86e55dSMatthew Auld } 702c86e55dSMatthew Auld 712c86e55dSMatthew Auld /** 722c86e55dSMatthew Auld * i915_ggtt_init_hw - Initialize GGTT hardware 732c86e55dSMatthew Auld * @i915: i915 device 742c86e55dSMatthew Auld */ 752c86e55dSMatthew Auld int i915_ggtt_init_hw(struct drm_i915_private *i915) 762c86e55dSMatthew Auld { 772c86e55dSMatthew Auld int ret; 782c86e55dSMatthew Auld 792c86e55dSMatthew Auld stash_init(&i915->mm.wc_stash); 802c86e55dSMatthew Auld 812c86e55dSMatthew Auld /* 822c86e55dSMatthew Auld * Note that we use page colouring to enforce a guard page at the 832c86e55dSMatthew Auld * end of the address space. This is required as the CS may prefetch 842c86e55dSMatthew Auld * beyond the end of the batch buffer, across the page boundary, 852c86e55dSMatthew Auld * and beyond the end of the GTT if we do not provide a guard. 862c86e55dSMatthew Auld */ 872c86e55dSMatthew Auld ret = ggtt_init_hw(&i915->ggtt); 882c86e55dSMatthew Auld if (ret) 892c86e55dSMatthew Auld return ret; 902c86e55dSMatthew Auld 912c86e55dSMatthew Auld return 0; 922c86e55dSMatthew Auld } 932c86e55dSMatthew Auld 942c86e55dSMatthew Auld /* 952c86e55dSMatthew Auld * Certain Gen5 chipsets require require idling the GPU before 962c86e55dSMatthew Auld * unmapping anything from the GTT when VT-d is enabled. 972c86e55dSMatthew Auld */ 982c86e55dSMatthew Auld static bool needs_idle_maps(struct drm_i915_private *i915) 992c86e55dSMatthew Auld { 1002c86e55dSMatthew Auld /* 1012c86e55dSMatthew Auld * Query intel_iommu to see if we need the workaround. Presumably that 1022c86e55dSMatthew Auld * was loaded first. 1032c86e55dSMatthew Auld */ 1042c86e55dSMatthew Auld return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active(); 1052c86e55dSMatthew Auld } 1062c86e55dSMatthew Auld 107e986209cSChris Wilson void i915_ggtt_suspend(struct i915_ggtt *ggtt) 1082c86e55dSMatthew Auld { 109e986209cSChris Wilson ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 110e986209cSChris Wilson ggtt->invalidate(ggtt); 1112c86e55dSMatthew Auld 1122c86e55dSMatthew Auld intel_gt_check_and_clear_faults(ggtt->vm.gt); 1132c86e55dSMatthew Auld } 1142c86e55dSMatthew Auld 1152c86e55dSMatthew Auld void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) 1162c86e55dSMatthew Auld { 1172c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1182c86e55dSMatthew Auld 1192c86e55dSMatthew Auld spin_lock_irq(&uncore->lock); 1202c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1212c86e55dSMatthew Auld intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); 1222c86e55dSMatthew Auld spin_unlock_irq(&uncore->lock); 1232c86e55dSMatthew Auld } 1242c86e55dSMatthew Auld 1252c86e55dSMatthew Auld static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) 1262c86e55dSMatthew Auld { 1272c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1282c86e55dSMatthew Auld 1292c86e55dSMatthew Auld /* 1302c86e55dSMatthew Auld * Note that as an uncached mmio write, this will flush the 1312c86e55dSMatthew Auld * WCB of the writes into the GGTT before it triggers the invalidate. 1322c86e55dSMatthew Auld */ 1332c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1342c86e55dSMatthew Auld } 1352c86e55dSMatthew Auld 1362c86e55dSMatthew Auld static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) 1372c86e55dSMatthew Auld { 1382c86e55dSMatthew Auld struct intel_uncore *uncore = ggtt->vm.gt->uncore; 1392c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 1402c86e55dSMatthew Auld 1412c86e55dSMatthew Auld gen8_ggtt_invalidate(ggtt); 1422c86e55dSMatthew Auld 1432c86e55dSMatthew Auld if (INTEL_GEN(i915) >= 12) 1442c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, 1452c86e55dSMatthew Auld GEN12_GUC_TLB_INV_CR_INVALIDATE); 1462c86e55dSMatthew Auld else 1472c86e55dSMatthew Auld intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); 1482c86e55dSMatthew Auld } 1492c86e55dSMatthew Auld 1502c86e55dSMatthew Auld static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) 1512c86e55dSMatthew Auld { 1522c86e55dSMatthew Auld intel_gtt_chipset_flush(); 1532c86e55dSMatthew Auld } 1542c86e55dSMatthew Auld 1552c86e55dSMatthew Auld static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 1562c86e55dSMatthew Auld { 1572c86e55dSMatthew Auld writeq(pte, addr); 1582c86e55dSMatthew Auld } 1592c86e55dSMatthew Auld 1602c86e55dSMatthew Auld static void gen8_ggtt_insert_page(struct i915_address_space *vm, 1612c86e55dSMatthew Auld dma_addr_t addr, 1622c86e55dSMatthew Auld u64 offset, 1632c86e55dSMatthew Auld enum i915_cache_level level, 1642c86e55dSMatthew Auld u32 unused) 1652c86e55dSMatthew Auld { 1662c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 1672c86e55dSMatthew Auld gen8_pte_t __iomem *pte = 1682c86e55dSMatthew Auld (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 1692c86e55dSMatthew Auld 1702c86e55dSMatthew Auld gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); 1712c86e55dSMatthew Auld 1722c86e55dSMatthew Auld ggtt->invalidate(ggtt); 1732c86e55dSMatthew Auld } 1742c86e55dSMatthew Auld 1752c86e55dSMatthew Auld static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 1762c86e55dSMatthew Auld struct i915_vma *vma, 1772c86e55dSMatthew Auld enum i915_cache_level level, 1782c86e55dSMatthew Auld u32 flags) 1792c86e55dSMatthew Auld { 1802c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 1812c86e55dSMatthew Auld struct sgt_iter sgt_iter; 1822c86e55dSMatthew Auld gen8_pte_t __iomem *gtt_entries; 1832c86e55dSMatthew Auld const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); 1842c86e55dSMatthew Auld dma_addr_t addr; 1852c86e55dSMatthew Auld 1862c86e55dSMatthew Auld /* 1872c86e55dSMatthew Auld * Note that we ignore PTE_READ_ONLY here. The caller must be careful 1882c86e55dSMatthew Auld * not to allow the user to override access to a read only page. 1892c86e55dSMatthew Auld */ 1902c86e55dSMatthew Auld 1912c86e55dSMatthew Auld gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; 1922c86e55dSMatthew Auld gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; 1932c86e55dSMatthew Auld for_each_sgt_daddr(addr, sgt_iter, vma->pages) 1942c86e55dSMatthew Auld gen8_set_pte(gtt_entries++, pte_encode | addr); 1952c86e55dSMatthew Auld 1962c86e55dSMatthew Auld /* 1972c86e55dSMatthew Auld * We want to flush the TLBs only after we're certain all the PTE 1982c86e55dSMatthew Auld * updates have finished. 1992c86e55dSMatthew Auld */ 2002c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2012c86e55dSMatthew Auld } 2022c86e55dSMatthew Auld 2032c86e55dSMatthew Auld static void gen6_ggtt_insert_page(struct i915_address_space *vm, 2042c86e55dSMatthew Auld dma_addr_t addr, 2052c86e55dSMatthew Auld u64 offset, 2062c86e55dSMatthew Auld enum i915_cache_level level, 2072c86e55dSMatthew Auld u32 flags) 2082c86e55dSMatthew Auld { 2092c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2102c86e55dSMatthew Auld gen6_pte_t __iomem *pte = 2112c86e55dSMatthew Auld (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; 2122c86e55dSMatthew Auld 2132c86e55dSMatthew Auld iowrite32(vm->pte_encode(addr, level, flags), pte); 2142c86e55dSMatthew Auld 2152c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2162c86e55dSMatthew Auld } 2172c86e55dSMatthew Auld 2182c86e55dSMatthew Auld /* 2192c86e55dSMatthew Auld * Binds an object into the global gtt with the specified cache level. 2202c86e55dSMatthew Auld * The object will be accessible to the GPU via commands whose operands 2212c86e55dSMatthew Auld * reference offsets within the global GTT as well as accessible by the GPU 2222c86e55dSMatthew Auld * through the GMADR mapped BAR (i915->mm.gtt->gtt). 2232c86e55dSMatthew Auld */ 2242c86e55dSMatthew Auld static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 2252c86e55dSMatthew Auld struct i915_vma *vma, 2262c86e55dSMatthew Auld enum i915_cache_level level, 2272c86e55dSMatthew Auld u32 flags) 2282c86e55dSMatthew Auld { 2292c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2302c86e55dSMatthew Auld gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; 2312c86e55dSMatthew Auld unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; 2322c86e55dSMatthew Auld struct sgt_iter iter; 2332c86e55dSMatthew Auld dma_addr_t addr; 2342c86e55dSMatthew Auld 2352c86e55dSMatthew Auld for_each_sgt_daddr(addr, iter, vma->pages) 2362c86e55dSMatthew Auld iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); 2372c86e55dSMatthew Auld 2382c86e55dSMatthew Auld /* 2392c86e55dSMatthew Auld * We want to flush the TLBs only after we're certain all the PTE 2402c86e55dSMatthew Auld * updates have finished. 2412c86e55dSMatthew Auld */ 2422c86e55dSMatthew Auld ggtt->invalidate(ggtt); 2432c86e55dSMatthew Auld } 2442c86e55dSMatthew Auld 2452c86e55dSMatthew Auld static void nop_clear_range(struct i915_address_space *vm, 2462c86e55dSMatthew Auld u64 start, u64 length) 2472c86e55dSMatthew Auld { 2482c86e55dSMatthew Auld } 2492c86e55dSMatthew Auld 2502c86e55dSMatthew Auld static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2512c86e55dSMatthew Auld u64 start, u64 length) 2522c86e55dSMatthew Auld { 2532c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2542c86e55dSMatthew Auld unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 2552c86e55dSMatthew Auld unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 2562c86e55dSMatthew Auld const gen8_pte_t scratch_pte = vm->scratch[0].encode; 2572c86e55dSMatthew Auld gen8_pte_t __iomem *gtt_base = 2582c86e55dSMatthew Auld (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2592c86e55dSMatthew Auld const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2602c86e55dSMatthew Auld int i; 2612c86e55dSMatthew Auld 2622c86e55dSMatthew Auld if (WARN(num_entries > max_entries, 2632c86e55dSMatthew Auld "First entry = %d; Num entries = %d (max=%d)\n", 2642c86e55dSMatthew Auld first_entry, num_entries, max_entries)) 2652c86e55dSMatthew Auld num_entries = max_entries; 2662c86e55dSMatthew Auld 2672c86e55dSMatthew Auld for (i = 0; i < num_entries; i++) 2682c86e55dSMatthew Auld gen8_set_pte(>t_base[i], scratch_pte); 2692c86e55dSMatthew Auld } 2702c86e55dSMatthew Auld 2712c86e55dSMatthew Auld static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) 2722c86e55dSMatthew Auld { 2732c86e55dSMatthew Auld /* 2742c86e55dSMatthew Auld * Make sure the internal GAM fifo has been cleared of all GTT 2752c86e55dSMatthew Auld * writes before exiting stop_machine(). This guarantees that 2762c86e55dSMatthew Auld * any aperture accesses waiting to start in another process 2772c86e55dSMatthew Auld * cannot back up behind the GTT writes causing a hang. 2782c86e55dSMatthew Auld * The register can be any arbitrary GAM register. 2792c86e55dSMatthew Auld */ 2802c86e55dSMatthew Auld intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); 2812c86e55dSMatthew Auld } 2822c86e55dSMatthew Auld 2832c86e55dSMatthew Auld struct insert_page { 2842c86e55dSMatthew Auld struct i915_address_space *vm; 2852c86e55dSMatthew Auld dma_addr_t addr; 2862c86e55dSMatthew Auld u64 offset; 2872c86e55dSMatthew Auld enum i915_cache_level level; 2882c86e55dSMatthew Auld }; 2892c86e55dSMatthew Auld 2902c86e55dSMatthew Auld static int bxt_vtd_ggtt_insert_page__cb(void *_arg) 2912c86e55dSMatthew Auld { 2922c86e55dSMatthew Auld struct insert_page *arg = _arg; 2932c86e55dSMatthew Auld 2942c86e55dSMatthew Auld gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); 2952c86e55dSMatthew Auld bxt_vtd_ggtt_wa(arg->vm); 2962c86e55dSMatthew Auld 2972c86e55dSMatthew Auld return 0; 2982c86e55dSMatthew Auld } 2992c86e55dSMatthew Auld 3002c86e55dSMatthew Auld static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, 3012c86e55dSMatthew Auld dma_addr_t addr, 3022c86e55dSMatthew Auld u64 offset, 3032c86e55dSMatthew Auld enum i915_cache_level level, 3042c86e55dSMatthew Auld u32 unused) 3052c86e55dSMatthew Auld { 3062c86e55dSMatthew Auld struct insert_page arg = { vm, addr, offset, level }; 3072c86e55dSMatthew Auld 3082c86e55dSMatthew Auld stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); 3092c86e55dSMatthew Auld } 3102c86e55dSMatthew Auld 3112c86e55dSMatthew Auld struct insert_entries { 3122c86e55dSMatthew Auld struct i915_address_space *vm; 3132c86e55dSMatthew Auld struct i915_vma *vma; 3142c86e55dSMatthew Auld enum i915_cache_level level; 3152c86e55dSMatthew Auld u32 flags; 3162c86e55dSMatthew Auld }; 3172c86e55dSMatthew Auld 3182c86e55dSMatthew Auld static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) 3192c86e55dSMatthew Auld { 3202c86e55dSMatthew Auld struct insert_entries *arg = _arg; 3212c86e55dSMatthew Auld 3222c86e55dSMatthew Auld gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); 3232c86e55dSMatthew Auld bxt_vtd_ggtt_wa(arg->vm); 3242c86e55dSMatthew Auld 3252c86e55dSMatthew Auld return 0; 3262c86e55dSMatthew Auld } 3272c86e55dSMatthew Auld 3282c86e55dSMatthew Auld static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, 3292c86e55dSMatthew Auld struct i915_vma *vma, 3302c86e55dSMatthew Auld enum i915_cache_level level, 3312c86e55dSMatthew Auld u32 flags) 3322c86e55dSMatthew Auld { 3332c86e55dSMatthew Auld struct insert_entries arg = { vm, vma, level, flags }; 3342c86e55dSMatthew Auld 3352c86e55dSMatthew Auld stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); 3362c86e55dSMatthew Auld } 3372c86e55dSMatthew Auld 3382c86e55dSMatthew Auld static void gen6_ggtt_clear_range(struct i915_address_space *vm, 3392c86e55dSMatthew Auld u64 start, u64 length) 3402c86e55dSMatthew Auld { 3412c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 3422c86e55dSMatthew Auld unsigned int first_entry = start / I915_GTT_PAGE_SIZE; 3432c86e55dSMatthew Auld unsigned int num_entries = length / I915_GTT_PAGE_SIZE; 3442c86e55dSMatthew Auld gen6_pte_t scratch_pte, __iomem *gtt_base = 3452c86e55dSMatthew Auld (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 3462c86e55dSMatthew Auld const int max_entries = ggtt_total_entries(ggtt) - first_entry; 3472c86e55dSMatthew Auld int i; 3482c86e55dSMatthew Auld 3492c86e55dSMatthew Auld if (WARN(num_entries > max_entries, 3502c86e55dSMatthew Auld "First entry = %d; Num entries = %d (max=%d)\n", 3512c86e55dSMatthew Auld first_entry, num_entries, max_entries)) 3522c86e55dSMatthew Auld num_entries = max_entries; 3532c86e55dSMatthew Auld 3542c86e55dSMatthew Auld scratch_pte = vm->scratch[0].encode; 3552c86e55dSMatthew Auld for (i = 0; i < num_entries; i++) 3562c86e55dSMatthew Auld iowrite32(scratch_pte, >t_base[i]); 3572c86e55dSMatthew Auld } 3582c86e55dSMatthew Auld 3592c86e55dSMatthew Auld static void i915_ggtt_insert_page(struct i915_address_space *vm, 3602c86e55dSMatthew Auld dma_addr_t addr, 3612c86e55dSMatthew Auld u64 offset, 3622c86e55dSMatthew Auld enum i915_cache_level cache_level, 3632c86e55dSMatthew Auld u32 unused) 3642c86e55dSMatthew Auld { 3652c86e55dSMatthew Auld unsigned int flags = (cache_level == I915_CACHE_NONE) ? 3662c86e55dSMatthew Auld AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 3672c86e55dSMatthew Auld 3682c86e55dSMatthew Auld intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); 3692c86e55dSMatthew Auld } 3702c86e55dSMatthew Auld 3712c86e55dSMatthew Auld static void i915_ggtt_insert_entries(struct i915_address_space *vm, 3722c86e55dSMatthew Auld struct i915_vma *vma, 3732c86e55dSMatthew Auld enum i915_cache_level cache_level, 3742c86e55dSMatthew Auld u32 unused) 3752c86e55dSMatthew Auld { 3762c86e55dSMatthew Auld unsigned int flags = (cache_level == I915_CACHE_NONE) ? 3772c86e55dSMatthew Auld AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 3782c86e55dSMatthew Auld 3792c86e55dSMatthew Auld intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, 3802c86e55dSMatthew Auld flags); 3812c86e55dSMatthew Auld } 3822c86e55dSMatthew Auld 3832c86e55dSMatthew Auld static void i915_ggtt_clear_range(struct i915_address_space *vm, 3842c86e55dSMatthew Auld u64 start, u64 length) 3852c86e55dSMatthew Auld { 3862c86e55dSMatthew Auld intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); 3872c86e55dSMatthew Auld } 3882c86e55dSMatthew Auld 3892c86e55dSMatthew Auld static int ggtt_bind_vma(struct i915_vma *vma, 3902c86e55dSMatthew Auld enum i915_cache_level cache_level, 3912c86e55dSMatthew Auld u32 flags) 3922c86e55dSMatthew Auld { 3932c86e55dSMatthew Auld struct drm_i915_gem_object *obj = vma->obj; 3942c86e55dSMatthew Auld u32 pte_flags; 3952c86e55dSMatthew Auld 3962c86e55dSMatthew Auld /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ 3972c86e55dSMatthew Auld pte_flags = 0; 3982c86e55dSMatthew Auld if (i915_gem_object_is_readonly(obj)) 3992c86e55dSMatthew Auld pte_flags |= PTE_READ_ONLY; 4002c86e55dSMatthew Auld 4012c86e55dSMatthew Auld vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); 4022c86e55dSMatthew Auld 4032c86e55dSMatthew Auld vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; 4042c86e55dSMatthew Auld 4052c86e55dSMatthew Auld /* 4062c86e55dSMatthew Auld * Without aliasing PPGTT there's no difference between 4072c86e55dSMatthew Auld * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally 4082c86e55dSMatthew Auld * upgrade to both bound if we bind either to avoid double-binding. 4092c86e55dSMatthew Auld */ 4102c86e55dSMatthew Auld atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); 4112c86e55dSMatthew Auld 4122c86e55dSMatthew Auld return 0; 4132c86e55dSMatthew Auld } 4142c86e55dSMatthew Auld 4152c86e55dSMatthew Auld static void ggtt_unbind_vma(struct i915_vma *vma) 4162c86e55dSMatthew Auld { 4172c86e55dSMatthew Auld vma->vm->clear_range(vma->vm, vma->node.start, vma->size); 4182c86e55dSMatthew Auld } 4192c86e55dSMatthew Auld 4202c86e55dSMatthew Auld static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) 4212c86e55dSMatthew Auld { 4222c86e55dSMatthew Auld u64 size; 4232c86e55dSMatthew Auld int ret; 4242c86e55dSMatthew Auld 4252c86e55dSMatthew Auld if (!USES_GUC(ggtt->vm.i915)) 4262c86e55dSMatthew Auld return 0; 4272c86e55dSMatthew Auld 4282c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); 4292c86e55dSMatthew Auld size = ggtt->vm.total - GUC_GGTT_TOP; 4302c86e55dSMatthew Auld 4312c86e55dSMatthew Auld ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, 4322c86e55dSMatthew Auld GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, 4332c86e55dSMatthew Auld PIN_NOEVICT); 4342c86e55dSMatthew Auld if (ret) 43552ce7074SWambui Karuga drm_dbg(&ggtt->vm.i915->drm, 43652ce7074SWambui Karuga "Failed to reserve top of GGTT for GuC\n"); 4372c86e55dSMatthew Auld 4382c86e55dSMatthew Auld return ret; 4392c86e55dSMatthew Auld } 4402c86e55dSMatthew Auld 4412c86e55dSMatthew Auld static void ggtt_release_guc_top(struct i915_ggtt *ggtt) 4422c86e55dSMatthew Auld { 4432c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->uc_fw)) 4442c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->uc_fw); 4452c86e55dSMatthew Auld } 4462c86e55dSMatthew Auld 4472c86e55dSMatthew Auld static void cleanup_init_ggtt(struct i915_ggtt *ggtt) 4482c86e55dSMatthew Auld { 4492c86e55dSMatthew Auld ggtt_release_guc_top(ggtt); 4502c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->error_capture)) 4512c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->error_capture); 452742379c0SChris Wilson mutex_destroy(&ggtt->error_mutex); 4532c86e55dSMatthew Auld } 4542c86e55dSMatthew Auld 4552c86e55dSMatthew Auld static int init_ggtt(struct i915_ggtt *ggtt) 4562c86e55dSMatthew Auld { 4572c86e55dSMatthew Auld /* 4582c86e55dSMatthew Auld * Let GEM Manage all of the aperture. 4592c86e55dSMatthew Auld * 4602c86e55dSMatthew Auld * However, leave one page at the end still bound to the scratch page. 4612c86e55dSMatthew Auld * There are a number of places where the hardware apparently prefetches 4622c86e55dSMatthew Auld * past the end of the object, and we've seen multiple hangs with the 4632c86e55dSMatthew Auld * GPU head pointer stuck in a batchbuffer bound at the last page of the 4642c86e55dSMatthew Auld * aperture. One page should be enough to keep any prefetching inside 4652c86e55dSMatthew Auld * of the aperture. 4662c86e55dSMatthew Auld */ 4672c86e55dSMatthew Auld unsigned long hole_start, hole_end; 4682c86e55dSMatthew Auld struct drm_mm_node *entry; 4692c86e55dSMatthew Auld int ret; 4702c86e55dSMatthew Auld 4712c86e55dSMatthew Auld /* 4722c86e55dSMatthew Auld * GuC requires all resources that we're sharing with it to be placed in 4732c86e55dSMatthew Auld * non-WOPCM memory. If GuC is not present or not in use we still need a 4742c86e55dSMatthew Auld * small bias as ring wraparound at offset 0 sometimes hangs. No idea 4752c86e55dSMatthew Auld * why. 4762c86e55dSMatthew Auld */ 4772c86e55dSMatthew Auld ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, 4782c86e55dSMatthew Auld intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); 4792c86e55dSMatthew Auld 4802c86e55dSMatthew Auld ret = intel_vgt_balloon(ggtt); 4812c86e55dSMatthew Auld if (ret) 4822c86e55dSMatthew Auld return ret; 4832c86e55dSMatthew Auld 484742379c0SChris Wilson mutex_init(&ggtt->error_mutex); 4852c86e55dSMatthew Auld if (ggtt->mappable_end) { 4862c86e55dSMatthew Auld /* Reserve a mappable slot for our lockless error capture */ 4872c86e55dSMatthew Auld ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, 4882c86e55dSMatthew Auld &ggtt->error_capture, 4892c86e55dSMatthew Auld PAGE_SIZE, 0, 4902c86e55dSMatthew Auld I915_COLOR_UNEVICTABLE, 4912c86e55dSMatthew Auld 0, ggtt->mappable_end, 4922c86e55dSMatthew Auld DRM_MM_INSERT_LOW); 4932c86e55dSMatthew Auld if (ret) 4942c86e55dSMatthew Auld return ret; 4952c86e55dSMatthew Auld } 4962c86e55dSMatthew Auld 4972c86e55dSMatthew Auld /* 4982c86e55dSMatthew Auld * The upper portion of the GuC address space has a sizeable hole 4992c86e55dSMatthew Auld * (several MB) that is inaccessible by GuC. Reserve this range within 5002c86e55dSMatthew Auld * GGTT as it can comfortably hold GuC/HuC firmware images. 5012c86e55dSMatthew Auld */ 5022c86e55dSMatthew Auld ret = ggtt_reserve_guc_top(ggtt); 5032c86e55dSMatthew Auld if (ret) 5042c86e55dSMatthew Auld goto err; 5052c86e55dSMatthew Auld 5062c86e55dSMatthew Auld /* Clear any non-preallocated blocks */ 5072c86e55dSMatthew Auld drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { 50852ce7074SWambui Karuga drm_dbg_kms(&ggtt->vm.i915->drm, 50952ce7074SWambui Karuga "clearing unused GTT space: [%lx, %lx]\n", 5102c86e55dSMatthew Auld hole_start, hole_end); 5112c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, hole_start, 5122c86e55dSMatthew Auld hole_end - hole_start); 5132c86e55dSMatthew Auld } 5142c86e55dSMatthew Auld 5152c86e55dSMatthew Auld /* And finally clear the reserved guard page */ 5162c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); 5172c86e55dSMatthew Auld 5182c86e55dSMatthew Auld return 0; 5192c86e55dSMatthew Auld 5202c86e55dSMatthew Auld err: 5212c86e55dSMatthew Auld cleanup_init_ggtt(ggtt); 5222c86e55dSMatthew Auld return ret; 5232c86e55dSMatthew Auld } 5242c86e55dSMatthew Auld 5252c86e55dSMatthew Auld static int aliasing_gtt_bind_vma(struct i915_vma *vma, 5262c86e55dSMatthew Auld enum i915_cache_level cache_level, 5272c86e55dSMatthew Auld u32 flags) 5282c86e55dSMatthew Auld { 5292c86e55dSMatthew Auld u32 pte_flags; 5302c86e55dSMatthew Auld int ret; 5312c86e55dSMatthew Auld 5322c86e55dSMatthew Auld /* Currently applicable only to VLV */ 5332c86e55dSMatthew Auld pte_flags = 0; 5342c86e55dSMatthew Auld if (i915_gem_object_is_readonly(vma->obj)) 5352c86e55dSMatthew Auld pte_flags |= PTE_READ_ONLY; 5362c86e55dSMatthew Auld 5372c86e55dSMatthew Auld if (flags & I915_VMA_LOCAL_BIND) { 5382c86e55dSMatthew Auld struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; 5392c86e55dSMatthew Auld 5402c86e55dSMatthew Auld if (flags & I915_VMA_ALLOC) { 5412c86e55dSMatthew Auld ret = alias->vm.allocate_va_range(&alias->vm, 5422c86e55dSMatthew Auld vma->node.start, 5432c86e55dSMatthew Auld vma->size); 5442c86e55dSMatthew Auld if (ret) 5452c86e55dSMatthew Auld return ret; 5462c86e55dSMatthew Auld 5472c86e55dSMatthew Auld set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); 5482c86e55dSMatthew Auld } 5492c86e55dSMatthew Auld 5502c86e55dSMatthew Auld GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, 5512c86e55dSMatthew Auld __i915_vma_flags(vma))); 5522c86e55dSMatthew Auld alias->vm.insert_entries(&alias->vm, vma, 5532c86e55dSMatthew Auld cache_level, pte_flags); 5542c86e55dSMatthew Auld } 5552c86e55dSMatthew Auld 556c0e60347SChris Wilson if (flags & I915_VMA_GLOBAL_BIND) 557c0e60347SChris Wilson vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); 5582c86e55dSMatthew Auld 5592c86e55dSMatthew Auld return 0; 5602c86e55dSMatthew Auld } 5612c86e55dSMatthew Auld 5622c86e55dSMatthew Auld static void aliasing_gtt_unbind_vma(struct i915_vma *vma) 5632c86e55dSMatthew Auld { 5642c86e55dSMatthew Auld if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { 5652c86e55dSMatthew Auld struct i915_address_space *vm = vma->vm; 5662c86e55dSMatthew Auld 5672c86e55dSMatthew Auld vm->clear_range(vm, vma->node.start, vma->size); 5682c86e55dSMatthew Auld } 5692c86e55dSMatthew Auld 5702c86e55dSMatthew Auld if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { 5712c86e55dSMatthew Auld struct i915_address_space *vm = 5722c86e55dSMatthew Auld &i915_vm_to_ggtt(vma->vm)->alias->vm; 5732c86e55dSMatthew Auld 5742c86e55dSMatthew Auld vm->clear_range(vm, vma->node.start, vma->size); 5752c86e55dSMatthew Auld } 5762c86e55dSMatthew Auld } 5772c86e55dSMatthew Auld 5782c86e55dSMatthew Auld static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) 5792c86e55dSMatthew Auld { 5802c86e55dSMatthew Auld struct i915_ppgtt *ppgtt; 5812c86e55dSMatthew Auld int err; 5822c86e55dSMatthew Auld 5832c86e55dSMatthew Auld ppgtt = i915_ppgtt_create(ggtt->vm.gt); 5842c86e55dSMatthew Auld if (IS_ERR(ppgtt)) 5852c86e55dSMatthew Auld return PTR_ERR(ppgtt); 5862c86e55dSMatthew Auld 5872c86e55dSMatthew Auld if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { 5882c86e55dSMatthew Auld err = -ENODEV; 5892c86e55dSMatthew Auld goto err_ppgtt; 5902c86e55dSMatthew Auld } 5912c86e55dSMatthew Auld 5922c86e55dSMatthew Auld /* 5932c86e55dSMatthew Auld * Note we only pre-allocate as far as the end of the global 5942c86e55dSMatthew Auld * GTT. On 48b / 4-level page-tables, the difference is very, 5952c86e55dSMatthew Auld * very significant! We have to preallocate as GVT/vgpu does 5962c86e55dSMatthew Auld * not like the page directory disappearing. 5972c86e55dSMatthew Auld */ 5982c86e55dSMatthew Auld err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); 5992c86e55dSMatthew Auld if (err) 6002c86e55dSMatthew Auld goto err_ppgtt; 6012c86e55dSMatthew Auld 6022c86e55dSMatthew Auld ggtt->alias = ppgtt; 6032c86e55dSMatthew Auld ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; 6042c86e55dSMatthew Auld 6052c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); 6062c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; 6072c86e55dSMatthew Auld 6082c86e55dSMatthew Auld GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); 6092c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; 6102c86e55dSMatthew Auld 6112c86e55dSMatthew Auld return 0; 6122c86e55dSMatthew Auld 6132c86e55dSMatthew Auld err_ppgtt: 6142c86e55dSMatthew Auld i915_vm_put(&ppgtt->vm); 6152c86e55dSMatthew Auld return err; 6162c86e55dSMatthew Auld } 6172c86e55dSMatthew Auld 6182c86e55dSMatthew Auld static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) 6192c86e55dSMatthew Auld { 6202c86e55dSMatthew Auld struct i915_ppgtt *ppgtt; 6212c86e55dSMatthew Auld 6222c86e55dSMatthew Auld ppgtt = fetch_and_zero(&ggtt->alias); 6232c86e55dSMatthew Auld if (!ppgtt) 6242c86e55dSMatthew Auld return; 6252c86e55dSMatthew Auld 6262c86e55dSMatthew Auld i915_vm_put(&ppgtt->vm); 6272c86e55dSMatthew Auld 6282c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 6292c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 6302c86e55dSMatthew Auld } 6312c86e55dSMatthew Auld 6322c86e55dSMatthew Auld int i915_init_ggtt(struct drm_i915_private *i915) 6332c86e55dSMatthew Auld { 6342c86e55dSMatthew Auld int ret; 6352c86e55dSMatthew Auld 6362c86e55dSMatthew Auld ret = init_ggtt(&i915->ggtt); 6372c86e55dSMatthew Auld if (ret) 6382c86e55dSMatthew Auld return ret; 6392c86e55dSMatthew Auld 6402c86e55dSMatthew Auld if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { 6412c86e55dSMatthew Auld ret = init_aliasing_ppgtt(&i915->ggtt); 6422c86e55dSMatthew Auld if (ret) 6432c86e55dSMatthew Auld cleanup_init_ggtt(&i915->ggtt); 6442c86e55dSMatthew Auld } 6452c86e55dSMatthew Auld 6462c86e55dSMatthew Auld return 0; 6472c86e55dSMatthew Auld } 6482c86e55dSMatthew Auld 6492c86e55dSMatthew Auld static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) 6502c86e55dSMatthew Auld { 6512c86e55dSMatthew Auld struct i915_vma *vma, *vn; 6522c86e55dSMatthew Auld 6532c86e55dSMatthew Auld atomic_set(&ggtt->vm.open, 0); 6542c86e55dSMatthew Auld 6552c86e55dSMatthew Auld rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ 6562c86e55dSMatthew Auld flush_workqueue(ggtt->vm.i915->wq); 6572c86e55dSMatthew Auld 6582c86e55dSMatthew Auld mutex_lock(&ggtt->vm.mutex); 6592c86e55dSMatthew Auld 6602c86e55dSMatthew Auld list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) 6612c86e55dSMatthew Auld WARN_ON(__i915_vma_unbind(vma)); 6622c86e55dSMatthew Auld 6632c86e55dSMatthew Auld if (drm_mm_node_allocated(&ggtt->error_capture)) 6642c86e55dSMatthew Auld drm_mm_remove_node(&ggtt->error_capture); 665742379c0SChris Wilson mutex_destroy(&ggtt->error_mutex); 6662c86e55dSMatthew Auld 6672c86e55dSMatthew Auld ggtt_release_guc_top(ggtt); 6682c86e55dSMatthew Auld intel_vgt_deballoon(ggtt); 6692c86e55dSMatthew Auld 6702c86e55dSMatthew Auld ggtt->vm.cleanup(&ggtt->vm); 6712c86e55dSMatthew Auld 6722c86e55dSMatthew Auld mutex_unlock(&ggtt->vm.mutex); 6732c86e55dSMatthew Auld i915_address_space_fini(&ggtt->vm); 6742c86e55dSMatthew Auld 6752c86e55dSMatthew Auld arch_phys_wc_del(ggtt->mtrr); 6762c86e55dSMatthew Auld 6772c86e55dSMatthew Auld if (ggtt->iomap.size) 6782c86e55dSMatthew Auld io_mapping_fini(&ggtt->iomap); 6792c86e55dSMatthew Auld } 6802c86e55dSMatthew Auld 6812c86e55dSMatthew Auld /** 6822c86e55dSMatthew Auld * i915_ggtt_driver_release - Clean up GGTT hardware initialization 6832c86e55dSMatthew Auld * @i915: i915 device 6842c86e55dSMatthew Auld */ 6852c86e55dSMatthew Auld void i915_ggtt_driver_release(struct drm_i915_private *i915) 6862c86e55dSMatthew Auld { 6872c86e55dSMatthew Auld struct pagevec *pvec; 6882c86e55dSMatthew Auld 6892c86e55dSMatthew Auld fini_aliasing_ppgtt(&i915->ggtt); 6902c86e55dSMatthew Auld 6912c86e55dSMatthew Auld ggtt_cleanup_hw(&i915->ggtt); 6922c86e55dSMatthew Auld 6932c86e55dSMatthew Auld pvec = &i915->mm.wc_stash.pvec; 6942c86e55dSMatthew Auld if (pvec->nr) { 6952c86e55dSMatthew Auld set_pages_array_wb(pvec->pages, pvec->nr); 6962c86e55dSMatthew Auld __pagevec_release(pvec); 6972c86e55dSMatthew Auld } 6982c86e55dSMatthew Auld } 6992c86e55dSMatthew Auld 7002c86e55dSMatthew Auld static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 7012c86e55dSMatthew Auld { 7022c86e55dSMatthew Auld snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 7032c86e55dSMatthew Auld snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 7042c86e55dSMatthew Auld return snb_gmch_ctl << 20; 7052c86e55dSMatthew Auld } 7062c86e55dSMatthew Auld 7072c86e55dSMatthew Auld static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 7082c86e55dSMatthew Auld { 7092c86e55dSMatthew Auld bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 7102c86e55dSMatthew Auld bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 7112c86e55dSMatthew Auld if (bdw_gmch_ctl) 7122c86e55dSMatthew Auld bdw_gmch_ctl = 1 << bdw_gmch_ctl; 7132c86e55dSMatthew Auld 7142c86e55dSMatthew Auld #ifdef CONFIG_X86_32 7152c86e55dSMatthew Auld /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ 7162c86e55dSMatthew Auld if (bdw_gmch_ctl > 4) 7172c86e55dSMatthew Auld bdw_gmch_ctl = 4; 7182c86e55dSMatthew Auld #endif 7192c86e55dSMatthew Auld 7202c86e55dSMatthew Auld return bdw_gmch_ctl << 20; 7212c86e55dSMatthew Auld } 7222c86e55dSMatthew Auld 7232c86e55dSMatthew Auld static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 7242c86e55dSMatthew Auld { 7252c86e55dSMatthew Auld gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 7262c86e55dSMatthew Auld gmch_ctrl &= SNB_GMCH_GGMS_MASK; 7272c86e55dSMatthew Auld 7282c86e55dSMatthew Auld if (gmch_ctrl) 7292c86e55dSMatthew Auld return 1 << (20 + gmch_ctrl); 7302c86e55dSMatthew Auld 7312c86e55dSMatthew Auld return 0; 7322c86e55dSMatthew Auld } 7332c86e55dSMatthew Auld 7342c86e55dSMatthew Auld static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) 7352c86e55dSMatthew Auld { 7362c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 7372c86e55dSMatthew Auld struct pci_dev *pdev = i915->drm.pdev; 7382c86e55dSMatthew Auld phys_addr_t phys_addr; 7392c86e55dSMatthew Auld int ret; 7402c86e55dSMatthew Auld 7412c86e55dSMatthew Auld /* For Modern GENs the PTEs and register space are split in the BAR */ 7422c86e55dSMatthew Auld phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; 7432c86e55dSMatthew Auld 7442c86e55dSMatthew Auld /* 7452c86e55dSMatthew Auld * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range 7462c86e55dSMatthew Auld * will be dropped. For WC mappings in general we have 64 byte burst 7472c86e55dSMatthew Auld * writes when the WC buffer is flushed, so we can't use it, but have to 7482c86e55dSMatthew Auld * resort to an uncached mapping. The WC issue is easily caught by the 7492c86e55dSMatthew Auld * readback check when writing GTT PTE entries. 7502c86e55dSMatthew Auld */ 7512c86e55dSMatthew Auld if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) 7522c86e55dSMatthew Auld ggtt->gsm = ioremap_nocache(phys_addr, size); 7532c86e55dSMatthew Auld else 7542c86e55dSMatthew Auld ggtt->gsm = ioremap_wc(phys_addr, size); 7552c86e55dSMatthew Auld if (!ggtt->gsm) { 7562c86e55dSMatthew Auld DRM_ERROR("Failed to map the ggtt page table\n"); 7572c86e55dSMatthew Auld return -ENOMEM; 7582c86e55dSMatthew Auld } 7592c86e55dSMatthew Auld 7602c86e55dSMatthew Auld ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); 7612c86e55dSMatthew Auld if (ret) { 7622c86e55dSMatthew Auld DRM_ERROR("Scratch setup failed\n"); 7632c86e55dSMatthew Auld /* iounmap will also get called at remove, but meh */ 7642c86e55dSMatthew Auld iounmap(ggtt->gsm); 7652c86e55dSMatthew Auld return ret; 7662c86e55dSMatthew Auld } 7672c86e55dSMatthew Auld 7682c86e55dSMatthew Auld ggtt->vm.scratch[0].encode = 7692c86e55dSMatthew Auld ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), 7702c86e55dSMatthew Auld I915_CACHE_NONE, 0); 7712c86e55dSMatthew Auld 7722c86e55dSMatthew Auld return 0; 7732c86e55dSMatthew Auld } 7742c86e55dSMatthew Auld 7752c86e55dSMatthew Auld int ggtt_set_pages(struct i915_vma *vma) 7762c86e55dSMatthew Auld { 7772c86e55dSMatthew Auld int ret; 7782c86e55dSMatthew Auld 7792c86e55dSMatthew Auld GEM_BUG_ON(vma->pages); 7802c86e55dSMatthew Auld 7812c86e55dSMatthew Auld ret = i915_get_ggtt_vma_pages(vma); 7822c86e55dSMatthew Auld if (ret) 7832c86e55dSMatthew Auld return ret; 7842c86e55dSMatthew Auld 7852c86e55dSMatthew Auld vma->page_sizes = vma->obj->mm.page_sizes; 7862c86e55dSMatthew Auld 7872c86e55dSMatthew Auld return 0; 7882c86e55dSMatthew Auld } 7892c86e55dSMatthew Auld 7902c86e55dSMatthew Auld static void gen6_gmch_remove(struct i915_address_space *vm) 7912c86e55dSMatthew Auld { 7922c86e55dSMatthew Auld struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 7932c86e55dSMatthew Auld 7942c86e55dSMatthew Auld iounmap(ggtt->gsm); 7952c86e55dSMatthew Auld cleanup_scratch_page(vm); 7962c86e55dSMatthew Auld } 7972c86e55dSMatthew Auld 7982c86e55dSMatthew Auld static struct resource pci_resource(struct pci_dev *pdev, int bar) 7992c86e55dSMatthew Auld { 8002c86e55dSMatthew Auld return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), 8012c86e55dSMatthew Auld pci_resource_len(pdev, bar)); 8022c86e55dSMatthew Auld } 8032c86e55dSMatthew Auld 8042c86e55dSMatthew Auld static int gen8_gmch_probe(struct i915_ggtt *ggtt) 8052c86e55dSMatthew Auld { 8062c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 8072c86e55dSMatthew Auld struct pci_dev *pdev = i915->drm.pdev; 8082c86e55dSMatthew Auld unsigned int size; 8092c86e55dSMatthew Auld u16 snb_gmch_ctl; 8102c86e55dSMatthew Auld int err; 8112c86e55dSMatthew Auld 8122c86e55dSMatthew Auld /* TODO: We're not aware of mappable constraints on gen8 yet */ 8132c86e55dSMatthew Auld if (!IS_DGFX(i915)) { 8142c86e55dSMatthew Auld ggtt->gmadr = pci_resource(pdev, 2); 8152c86e55dSMatthew Auld ggtt->mappable_end = resource_size(&ggtt->gmadr); 8162c86e55dSMatthew Auld } 8172c86e55dSMatthew Auld 8182c86e55dSMatthew Auld err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); 8192c86e55dSMatthew Auld if (!err) 8202c86e55dSMatthew Auld err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); 8212c86e55dSMatthew Auld if (err) 8222c86e55dSMatthew Auld DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); 8232c86e55dSMatthew Auld 8242c86e55dSMatthew Auld pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 8252c86e55dSMatthew Auld if (IS_CHERRYVIEW(i915)) 8262c86e55dSMatthew Auld size = chv_get_total_gtt_size(snb_gmch_ctl); 8272c86e55dSMatthew Auld else 8282c86e55dSMatthew Auld size = gen8_get_total_gtt_size(snb_gmch_ctl); 8292c86e55dSMatthew Auld 8302c86e55dSMatthew Auld ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; 8312c86e55dSMatthew Auld ggtt->vm.cleanup = gen6_gmch_remove; 8322c86e55dSMatthew Auld ggtt->vm.insert_page = gen8_ggtt_insert_page; 8332c86e55dSMatthew Auld ggtt->vm.clear_range = nop_clear_range; 8342c86e55dSMatthew Auld if (intel_scanout_needs_vtd_wa(i915)) 8352c86e55dSMatthew Auld ggtt->vm.clear_range = gen8_ggtt_clear_range; 8362c86e55dSMatthew Auld 8372c86e55dSMatthew Auld ggtt->vm.insert_entries = gen8_ggtt_insert_entries; 8382c86e55dSMatthew Auld 8392c86e55dSMatthew Auld /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ 8402c86e55dSMatthew Auld if (intel_ggtt_update_needs_vtd_wa(i915) || 8412c86e55dSMatthew Auld IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { 8422c86e55dSMatthew Auld ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; 8432c86e55dSMatthew Auld ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; 8442c86e55dSMatthew Auld } 8452c86e55dSMatthew Auld 8462c86e55dSMatthew Auld ggtt->invalidate = gen8_ggtt_invalidate; 8472c86e55dSMatthew Auld 8482c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 8492c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 8502c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 8512c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 8522c86e55dSMatthew Auld 8532c86e55dSMatthew Auld ggtt->vm.pte_encode = gen8_pte_encode; 8542c86e55dSMatthew Auld 8552c86e55dSMatthew Auld setup_private_pat(ggtt->vm.gt->uncore); 8562c86e55dSMatthew Auld 8572c86e55dSMatthew Auld return ggtt_probe_common(ggtt, size); 8582c86e55dSMatthew Auld } 8592c86e55dSMatthew Auld 8602c86e55dSMatthew Auld static u64 snb_pte_encode(dma_addr_t addr, 8612c86e55dSMatthew Auld enum i915_cache_level level, 8622c86e55dSMatthew Auld u32 flags) 8632c86e55dSMatthew Auld { 8642c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 8652c86e55dSMatthew Auld 8662c86e55dSMatthew Auld switch (level) { 8672c86e55dSMatthew Auld case I915_CACHE_L3_LLC: 8682c86e55dSMatthew Auld case I915_CACHE_LLC: 8692c86e55dSMatthew Auld pte |= GEN6_PTE_CACHE_LLC; 8702c86e55dSMatthew Auld break; 8712c86e55dSMatthew Auld case I915_CACHE_NONE: 8722c86e55dSMatthew Auld pte |= GEN6_PTE_UNCACHED; 8732c86e55dSMatthew Auld break; 8742c86e55dSMatthew Auld default: 8752c86e55dSMatthew Auld MISSING_CASE(level); 8762c86e55dSMatthew Auld } 8772c86e55dSMatthew Auld 8782c86e55dSMatthew Auld return pte; 8792c86e55dSMatthew Auld } 8802c86e55dSMatthew Auld 8812c86e55dSMatthew Auld static u64 ivb_pte_encode(dma_addr_t addr, 8822c86e55dSMatthew Auld enum i915_cache_level level, 8832c86e55dSMatthew Auld u32 flags) 8842c86e55dSMatthew Auld { 8852c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 8862c86e55dSMatthew Auld 8872c86e55dSMatthew Auld switch (level) { 8882c86e55dSMatthew Auld case I915_CACHE_L3_LLC: 8892c86e55dSMatthew Auld pte |= GEN7_PTE_CACHE_L3_LLC; 8902c86e55dSMatthew Auld break; 8912c86e55dSMatthew Auld case I915_CACHE_LLC: 8922c86e55dSMatthew Auld pte |= GEN6_PTE_CACHE_LLC; 8932c86e55dSMatthew Auld break; 8942c86e55dSMatthew Auld case I915_CACHE_NONE: 8952c86e55dSMatthew Auld pte |= GEN6_PTE_UNCACHED; 8962c86e55dSMatthew Auld break; 8972c86e55dSMatthew Auld default: 8982c86e55dSMatthew Auld MISSING_CASE(level); 8992c86e55dSMatthew Auld } 9002c86e55dSMatthew Auld 9012c86e55dSMatthew Auld return pte; 9022c86e55dSMatthew Auld } 9032c86e55dSMatthew Auld 9042c86e55dSMatthew Auld static u64 byt_pte_encode(dma_addr_t addr, 9052c86e55dSMatthew Auld enum i915_cache_level level, 9062c86e55dSMatthew Auld u32 flags) 9072c86e55dSMatthew Auld { 9082c86e55dSMatthew Auld gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9092c86e55dSMatthew Auld 9102c86e55dSMatthew Auld if (!(flags & PTE_READ_ONLY)) 9112c86e55dSMatthew Auld pte |= BYT_PTE_WRITEABLE; 9122c86e55dSMatthew Auld 9132c86e55dSMatthew Auld if (level != I915_CACHE_NONE) 9142c86e55dSMatthew Auld pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 9152c86e55dSMatthew Auld 9162c86e55dSMatthew Auld return pte; 9172c86e55dSMatthew Auld } 9182c86e55dSMatthew Auld 9192c86e55dSMatthew Auld static u64 hsw_pte_encode(dma_addr_t addr, 9202c86e55dSMatthew Auld enum i915_cache_level level, 9212c86e55dSMatthew Auld u32 flags) 9222c86e55dSMatthew Auld { 9232c86e55dSMatthew Auld gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9242c86e55dSMatthew Auld 9252c86e55dSMatthew Auld if (level != I915_CACHE_NONE) 9262c86e55dSMatthew Auld pte |= HSW_WB_LLC_AGE3; 9272c86e55dSMatthew Auld 9282c86e55dSMatthew Auld return pte; 9292c86e55dSMatthew Auld } 9302c86e55dSMatthew Auld 9312c86e55dSMatthew Auld static u64 iris_pte_encode(dma_addr_t addr, 9322c86e55dSMatthew Auld enum i915_cache_level level, 9332c86e55dSMatthew Auld u32 flags) 9342c86e55dSMatthew Auld { 9352c86e55dSMatthew Auld gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID; 9362c86e55dSMatthew Auld 9372c86e55dSMatthew Auld switch (level) { 9382c86e55dSMatthew Auld case I915_CACHE_NONE: 9392c86e55dSMatthew Auld break; 9402c86e55dSMatthew Auld case I915_CACHE_WT: 9412c86e55dSMatthew Auld pte |= HSW_WT_ELLC_LLC_AGE3; 9422c86e55dSMatthew Auld break; 9432c86e55dSMatthew Auld default: 9442c86e55dSMatthew Auld pte |= HSW_WB_ELLC_LLC_AGE3; 9452c86e55dSMatthew Auld break; 9462c86e55dSMatthew Auld } 9472c86e55dSMatthew Auld 9482c86e55dSMatthew Auld return pte; 9492c86e55dSMatthew Auld } 9502c86e55dSMatthew Auld 9512c86e55dSMatthew Auld static int gen6_gmch_probe(struct i915_ggtt *ggtt) 9522c86e55dSMatthew Auld { 9532c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 9542c86e55dSMatthew Auld struct pci_dev *pdev = i915->drm.pdev; 9552c86e55dSMatthew Auld unsigned int size; 9562c86e55dSMatthew Auld u16 snb_gmch_ctl; 9572c86e55dSMatthew Auld int err; 9582c86e55dSMatthew Auld 9592c86e55dSMatthew Auld ggtt->gmadr = pci_resource(pdev, 2); 9602c86e55dSMatthew Auld ggtt->mappable_end = resource_size(&ggtt->gmadr); 9612c86e55dSMatthew Auld 9622c86e55dSMatthew Auld /* 9632c86e55dSMatthew Auld * 64/512MB is the current min/max we actually know of, but this is 9642c86e55dSMatthew Auld * just a coarse sanity check. 9652c86e55dSMatthew Auld */ 9662c86e55dSMatthew Auld if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { 9672c86e55dSMatthew Auld DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); 9682c86e55dSMatthew Auld return -ENXIO; 9692c86e55dSMatthew Auld } 9702c86e55dSMatthew Auld 9712c86e55dSMatthew Auld err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 9722c86e55dSMatthew Auld if (!err) 9732c86e55dSMatthew Auld err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 9742c86e55dSMatthew Auld if (err) 9752c86e55dSMatthew Auld DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); 9762c86e55dSMatthew Auld pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 9772c86e55dSMatthew Auld 9782c86e55dSMatthew Auld size = gen6_get_total_gtt_size(snb_gmch_ctl); 9792c86e55dSMatthew Auld ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; 9802c86e55dSMatthew Auld 9812c86e55dSMatthew Auld ggtt->vm.clear_range = nop_clear_range; 9822c86e55dSMatthew Auld if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) 9832c86e55dSMatthew Auld ggtt->vm.clear_range = gen6_ggtt_clear_range; 9842c86e55dSMatthew Auld ggtt->vm.insert_page = gen6_ggtt_insert_page; 9852c86e55dSMatthew Auld ggtt->vm.insert_entries = gen6_ggtt_insert_entries; 9862c86e55dSMatthew Auld ggtt->vm.cleanup = gen6_gmch_remove; 9872c86e55dSMatthew Auld 9882c86e55dSMatthew Auld ggtt->invalidate = gen6_ggtt_invalidate; 9892c86e55dSMatthew Auld 9902c86e55dSMatthew Auld if (HAS_EDRAM(i915)) 9912c86e55dSMatthew Auld ggtt->vm.pte_encode = iris_pte_encode; 9922c86e55dSMatthew Auld else if (IS_HASWELL(i915)) 9932c86e55dSMatthew Auld ggtt->vm.pte_encode = hsw_pte_encode; 9942c86e55dSMatthew Auld else if (IS_VALLEYVIEW(i915)) 9952c86e55dSMatthew Auld ggtt->vm.pte_encode = byt_pte_encode; 9962c86e55dSMatthew Auld else if (INTEL_GEN(i915) >= 7) 9972c86e55dSMatthew Auld ggtt->vm.pte_encode = ivb_pte_encode; 9982c86e55dSMatthew Auld else 9992c86e55dSMatthew Auld ggtt->vm.pte_encode = snb_pte_encode; 10002c86e55dSMatthew Auld 10012c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 10022c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 10032c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 10042c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 10052c86e55dSMatthew Auld 10062c86e55dSMatthew Auld return ggtt_probe_common(ggtt, size); 10072c86e55dSMatthew Auld } 10082c86e55dSMatthew Auld 10092c86e55dSMatthew Auld static void i915_gmch_remove(struct i915_address_space *vm) 10102c86e55dSMatthew Auld { 10112c86e55dSMatthew Auld intel_gmch_remove(); 10122c86e55dSMatthew Auld } 10132c86e55dSMatthew Auld 10142c86e55dSMatthew Auld static int i915_gmch_probe(struct i915_ggtt *ggtt) 10152c86e55dSMatthew Auld { 10162c86e55dSMatthew Auld struct drm_i915_private *i915 = ggtt->vm.i915; 10172c86e55dSMatthew Auld phys_addr_t gmadr_base; 10182c86e55dSMatthew Auld int ret; 10192c86e55dSMatthew Auld 10202c86e55dSMatthew Auld ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL); 10212c86e55dSMatthew Auld if (!ret) { 10222c86e55dSMatthew Auld DRM_ERROR("failed to set up gmch\n"); 10232c86e55dSMatthew Auld return -EIO; 10242c86e55dSMatthew Auld } 10252c86e55dSMatthew Auld 10262c86e55dSMatthew Auld intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); 10272c86e55dSMatthew Auld 10282c86e55dSMatthew Auld ggtt->gmadr = 10292c86e55dSMatthew Auld (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); 10302c86e55dSMatthew Auld 10312c86e55dSMatthew Auld ggtt->do_idle_maps = needs_idle_maps(i915); 10322c86e55dSMatthew Auld ggtt->vm.insert_page = i915_ggtt_insert_page; 10332c86e55dSMatthew Auld ggtt->vm.insert_entries = i915_ggtt_insert_entries; 10342c86e55dSMatthew Auld ggtt->vm.clear_range = i915_ggtt_clear_range; 10352c86e55dSMatthew Auld ggtt->vm.cleanup = i915_gmch_remove; 10362c86e55dSMatthew Auld 10372c86e55dSMatthew Auld ggtt->invalidate = gmch_ggtt_invalidate; 10382c86e55dSMatthew Auld 10392c86e55dSMatthew Auld ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; 10402c86e55dSMatthew Auld ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; 10412c86e55dSMatthew Auld ggtt->vm.vma_ops.set_pages = ggtt_set_pages; 10422c86e55dSMatthew Auld ggtt->vm.vma_ops.clear_pages = clear_pages; 10432c86e55dSMatthew Auld 10442c86e55dSMatthew Auld if (unlikely(ggtt->do_idle_maps)) 10452c86e55dSMatthew Auld dev_notice(i915->drm.dev, 10462c86e55dSMatthew Auld "Applying Ironlake quirks for intel_iommu\n"); 10472c86e55dSMatthew Auld 10482c86e55dSMatthew Auld return 0; 10492c86e55dSMatthew Auld } 10502c86e55dSMatthew Auld 10512c86e55dSMatthew Auld static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) 10522c86e55dSMatthew Auld { 10532c86e55dSMatthew Auld struct drm_i915_private *i915 = gt->i915; 10542c86e55dSMatthew Auld int ret; 10552c86e55dSMatthew Auld 10562c86e55dSMatthew Auld ggtt->vm.gt = gt; 10572c86e55dSMatthew Auld ggtt->vm.i915 = i915; 10582c86e55dSMatthew Auld ggtt->vm.dma = &i915->drm.pdev->dev; 10592c86e55dSMatthew Auld 10602c86e55dSMatthew Auld if (INTEL_GEN(i915) <= 5) 10612c86e55dSMatthew Auld ret = i915_gmch_probe(ggtt); 10622c86e55dSMatthew Auld else if (INTEL_GEN(i915) < 8) 10632c86e55dSMatthew Auld ret = gen6_gmch_probe(ggtt); 10642c86e55dSMatthew Auld else 10652c86e55dSMatthew Auld ret = gen8_gmch_probe(ggtt); 10662c86e55dSMatthew Auld if (ret) 10672c86e55dSMatthew Auld return ret; 10682c86e55dSMatthew Auld 10692c86e55dSMatthew Auld if ((ggtt->vm.total - 1) >> 32) { 10702c86e55dSMatthew Auld DRM_ERROR("We never expected a Global GTT with more than 32bits" 10712c86e55dSMatthew Auld " of address space! Found %lldM!\n", 10722c86e55dSMatthew Auld ggtt->vm.total >> 20); 10732c86e55dSMatthew Auld ggtt->vm.total = 1ULL << 32; 10742c86e55dSMatthew Auld ggtt->mappable_end = 10752c86e55dSMatthew Auld min_t(u64, ggtt->mappable_end, ggtt->vm.total); 10762c86e55dSMatthew Auld } 10772c86e55dSMatthew Auld 10782c86e55dSMatthew Auld if (ggtt->mappable_end > ggtt->vm.total) { 10792c86e55dSMatthew Auld DRM_ERROR("mappable aperture extends past end of GGTT," 10802c86e55dSMatthew Auld " aperture=%pa, total=%llx\n", 10812c86e55dSMatthew Auld &ggtt->mappable_end, ggtt->vm.total); 10822c86e55dSMatthew Auld ggtt->mappable_end = ggtt->vm.total; 10832c86e55dSMatthew Auld } 10842c86e55dSMatthew Auld 10852c86e55dSMatthew Auld /* GMADR is the PCI mmio aperture into the global GTT. */ 10862c86e55dSMatthew Auld DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); 10872c86e55dSMatthew Auld DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); 10882c86e55dSMatthew Auld DRM_DEBUG_DRIVER("DSM size = %lluM\n", 10892c86e55dSMatthew Auld (u64)resource_size(&intel_graphics_stolen_res) >> 20); 10902c86e55dSMatthew Auld 10912c86e55dSMatthew Auld return 0; 10922c86e55dSMatthew Auld } 10932c86e55dSMatthew Auld 10942c86e55dSMatthew Auld /** 10952c86e55dSMatthew Auld * i915_ggtt_probe_hw - Probe GGTT hardware location 10962c86e55dSMatthew Auld * @i915: i915 device 10972c86e55dSMatthew Auld */ 10982c86e55dSMatthew Auld int i915_ggtt_probe_hw(struct drm_i915_private *i915) 10992c86e55dSMatthew Auld { 11002c86e55dSMatthew Auld int ret; 11012c86e55dSMatthew Auld 11022c86e55dSMatthew Auld ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); 11032c86e55dSMatthew Auld if (ret) 11042c86e55dSMatthew Auld return ret; 11052c86e55dSMatthew Auld 11062c86e55dSMatthew Auld if (intel_vtd_active()) 11072c86e55dSMatthew Auld dev_info(i915->drm.dev, "VT-d active for gfx access\n"); 11082c86e55dSMatthew Auld 11092c86e55dSMatthew Auld return 0; 11102c86e55dSMatthew Auld } 11112c86e55dSMatthew Auld 11122c86e55dSMatthew Auld int i915_ggtt_enable_hw(struct drm_i915_private *i915) 11132c86e55dSMatthew Auld { 11142c86e55dSMatthew Auld if (INTEL_GEN(i915) < 6 && !intel_enable_gtt()) 11152c86e55dSMatthew Auld return -EIO; 11162c86e55dSMatthew Auld 11172c86e55dSMatthew Auld return 0; 11182c86e55dSMatthew Auld } 11192c86e55dSMatthew Auld 11202c86e55dSMatthew Auld void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) 11212c86e55dSMatthew Auld { 11222c86e55dSMatthew Auld GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); 11232c86e55dSMatthew Auld 11242c86e55dSMatthew Auld ggtt->invalidate = guc_ggtt_invalidate; 11252c86e55dSMatthew Auld 11262c86e55dSMatthew Auld ggtt->invalidate(ggtt); 11272c86e55dSMatthew Auld } 11282c86e55dSMatthew Auld 11292c86e55dSMatthew Auld void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) 11302c86e55dSMatthew Auld { 11312c86e55dSMatthew Auld /* XXX Temporary pardon for error unload */ 11322c86e55dSMatthew Auld if (ggtt->invalidate == gen8_ggtt_invalidate) 11332c86e55dSMatthew Auld return; 11342c86e55dSMatthew Auld 11352c86e55dSMatthew Auld /* We should only be called after i915_ggtt_enable_guc() */ 11362c86e55dSMatthew Auld GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); 11372c86e55dSMatthew Auld 11382c86e55dSMatthew Auld ggtt->invalidate = gen8_ggtt_invalidate; 11392c86e55dSMatthew Auld 11402c86e55dSMatthew Auld ggtt->invalidate(ggtt); 11412c86e55dSMatthew Auld } 11422c86e55dSMatthew Auld 1143e986209cSChris Wilson void i915_ggtt_resume(struct i915_ggtt *ggtt) 11442c86e55dSMatthew Auld { 114580e5351dSChris Wilson struct i915_vma *vma; 11462c86e55dSMatthew Auld bool flush = false; 11472c86e55dSMatthew Auld int open; 11482c86e55dSMatthew Auld 11492c86e55dSMatthew Auld intel_gt_check_and_clear_faults(ggtt->vm.gt); 11502c86e55dSMatthew Auld 11512c86e55dSMatthew Auld /* First fill our portion of the GTT with scratch pages */ 11522c86e55dSMatthew Auld ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 11532c86e55dSMatthew Auld 11542c86e55dSMatthew Auld /* Skip rewriting PTE on VMA unbind. */ 11552c86e55dSMatthew Auld open = atomic_xchg(&ggtt->vm.open, 0); 11562c86e55dSMatthew Auld 11572c86e55dSMatthew Auld /* clflush objects bound into the GGTT and rebind them. */ 115880e5351dSChris Wilson list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { 11592c86e55dSMatthew Auld struct drm_i915_gem_object *obj = vma->obj; 11602c86e55dSMatthew Auld 11612c86e55dSMatthew Auld if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 11622c86e55dSMatthew Auld continue; 11632c86e55dSMatthew Auld 11642c86e55dSMatthew Auld clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); 11652c86e55dSMatthew Auld WARN_ON(i915_vma_bind(vma, 11662c86e55dSMatthew Auld obj ? obj->cache_level : 0, 11672c86e55dSMatthew Auld PIN_GLOBAL, NULL)); 11682c86e55dSMatthew Auld if (obj) { /* only used during resume => exclusive access */ 11692c86e55dSMatthew Auld flush |= fetch_and_zero(&obj->write_domain); 11702c86e55dSMatthew Auld obj->read_domains |= I915_GEM_DOMAIN_GTT; 11712c86e55dSMatthew Auld } 11722c86e55dSMatthew Auld } 11732c86e55dSMatthew Auld 11742c86e55dSMatthew Auld atomic_set(&ggtt->vm.open, open); 11752c86e55dSMatthew Auld ggtt->invalidate(ggtt); 11762c86e55dSMatthew Auld 11772c86e55dSMatthew Auld if (flush) 11782c86e55dSMatthew Auld wbinvd_on_all_cpus(); 11792c86e55dSMatthew Auld 1180e986209cSChris Wilson if (INTEL_GEN(ggtt->vm.i915) >= 8) 11812c86e55dSMatthew Auld setup_private_pat(ggtt->vm.gt->uncore); 11822c86e55dSMatthew Auld } 11832c86e55dSMatthew Auld 11842c86e55dSMatthew Auld static struct scatterlist * 11852c86e55dSMatthew Auld rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, 11862c86e55dSMatthew Auld unsigned int width, unsigned int height, 11872c86e55dSMatthew Auld unsigned int stride, 11882c86e55dSMatthew Auld struct sg_table *st, struct scatterlist *sg) 11892c86e55dSMatthew Auld { 11902c86e55dSMatthew Auld unsigned int column, row; 11912c86e55dSMatthew Auld unsigned int src_idx; 11922c86e55dSMatthew Auld 11932c86e55dSMatthew Auld for (column = 0; column < width; column++) { 11942c86e55dSMatthew Auld src_idx = stride * (height - 1) + column + offset; 11952c86e55dSMatthew Auld for (row = 0; row < height; row++) { 11962c86e55dSMatthew Auld st->nents++; 11972c86e55dSMatthew Auld /* 11982c86e55dSMatthew Auld * We don't need the pages, but need to initialize 11992c86e55dSMatthew Auld * the entries so the sg list can be happily traversed. 12002c86e55dSMatthew Auld * The only thing we need are DMA addresses. 12012c86e55dSMatthew Auld */ 12022c86e55dSMatthew Auld sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); 12032c86e55dSMatthew Auld sg_dma_address(sg) = 12042c86e55dSMatthew Auld i915_gem_object_get_dma_address(obj, src_idx); 12052c86e55dSMatthew Auld sg_dma_len(sg) = I915_GTT_PAGE_SIZE; 12062c86e55dSMatthew Auld sg = sg_next(sg); 12072c86e55dSMatthew Auld src_idx -= stride; 12082c86e55dSMatthew Auld } 12092c86e55dSMatthew Auld } 12102c86e55dSMatthew Auld 12112c86e55dSMatthew Auld return sg; 12122c86e55dSMatthew Auld } 12132c86e55dSMatthew Auld 12142c86e55dSMatthew Auld static noinline struct sg_table * 12152c86e55dSMatthew Auld intel_rotate_pages(struct intel_rotation_info *rot_info, 12162c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 12172c86e55dSMatthew Auld { 12182c86e55dSMatthew Auld unsigned int size = intel_rotation_info_size(rot_info); 121952ce7074SWambui Karuga struct drm_i915_private *i915 = to_i915(obj->base.dev); 12202c86e55dSMatthew Auld struct sg_table *st; 12212c86e55dSMatthew Auld struct scatterlist *sg; 12222c86e55dSMatthew Auld int ret = -ENOMEM; 12232c86e55dSMatthew Auld int i; 12242c86e55dSMatthew Auld 12252c86e55dSMatthew Auld /* Allocate target SG list. */ 12262c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 12272c86e55dSMatthew Auld if (!st) 12282c86e55dSMatthew Auld goto err_st_alloc; 12292c86e55dSMatthew Auld 12302c86e55dSMatthew Auld ret = sg_alloc_table(st, size, GFP_KERNEL); 12312c86e55dSMatthew Auld if (ret) 12322c86e55dSMatthew Auld goto err_sg_alloc; 12332c86e55dSMatthew Auld 12342c86e55dSMatthew Auld st->nents = 0; 12352c86e55dSMatthew Auld sg = st->sgl; 12362c86e55dSMatthew Auld 12372c86e55dSMatthew Auld for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { 12382c86e55dSMatthew Auld sg = rotate_pages(obj, rot_info->plane[i].offset, 12392c86e55dSMatthew Auld rot_info->plane[i].width, rot_info->plane[i].height, 12402c86e55dSMatthew Auld rot_info->plane[i].stride, st, sg); 12412c86e55dSMatthew Auld } 12422c86e55dSMatthew Auld 12432c86e55dSMatthew Auld return st; 12442c86e55dSMatthew Auld 12452c86e55dSMatthew Auld err_sg_alloc: 12462c86e55dSMatthew Auld kfree(st); 12472c86e55dSMatthew Auld err_st_alloc: 12482c86e55dSMatthew Auld 124952ce7074SWambui Karuga drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", 125052ce7074SWambui Karuga obj->base.size, rot_info->plane[0].width, 125152ce7074SWambui Karuga rot_info->plane[0].height, size); 12522c86e55dSMatthew Auld 12532c86e55dSMatthew Auld return ERR_PTR(ret); 12542c86e55dSMatthew Auld } 12552c86e55dSMatthew Auld 12562c86e55dSMatthew Auld static struct scatterlist * 12572c86e55dSMatthew Auld remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, 12582c86e55dSMatthew Auld unsigned int width, unsigned int height, 12592c86e55dSMatthew Auld unsigned int stride, 12602c86e55dSMatthew Auld struct sg_table *st, struct scatterlist *sg) 12612c86e55dSMatthew Auld { 12622c86e55dSMatthew Auld unsigned int row; 12632c86e55dSMatthew Auld 12642c86e55dSMatthew Auld for (row = 0; row < height; row++) { 12652c86e55dSMatthew Auld unsigned int left = width * I915_GTT_PAGE_SIZE; 12662c86e55dSMatthew Auld 12672c86e55dSMatthew Auld while (left) { 12682c86e55dSMatthew Auld dma_addr_t addr; 12692c86e55dSMatthew Auld unsigned int length; 12702c86e55dSMatthew Auld 12712c86e55dSMatthew Auld /* 12722c86e55dSMatthew Auld * We don't need the pages, but need to initialize 12732c86e55dSMatthew Auld * the entries so the sg list can be happily traversed. 12742c86e55dSMatthew Auld * The only thing we need are DMA addresses. 12752c86e55dSMatthew Auld */ 12762c86e55dSMatthew Auld 12772c86e55dSMatthew Auld addr = i915_gem_object_get_dma_address_len(obj, offset, &length); 12782c86e55dSMatthew Auld 12792c86e55dSMatthew Auld length = min(left, length); 12802c86e55dSMatthew Auld 12812c86e55dSMatthew Auld st->nents++; 12822c86e55dSMatthew Auld 12832c86e55dSMatthew Auld sg_set_page(sg, NULL, length, 0); 12842c86e55dSMatthew Auld sg_dma_address(sg) = addr; 12852c86e55dSMatthew Auld sg_dma_len(sg) = length; 12862c86e55dSMatthew Auld sg = sg_next(sg); 12872c86e55dSMatthew Auld 12882c86e55dSMatthew Auld offset += length / I915_GTT_PAGE_SIZE; 12892c86e55dSMatthew Auld left -= length; 12902c86e55dSMatthew Auld } 12912c86e55dSMatthew Auld 12922c86e55dSMatthew Auld offset += stride - width; 12932c86e55dSMatthew Auld } 12942c86e55dSMatthew Auld 12952c86e55dSMatthew Auld return sg; 12962c86e55dSMatthew Auld } 12972c86e55dSMatthew Auld 12982c86e55dSMatthew Auld static noinline struct sg_table * 12992c86e55dSMatthew Auld intel_remap_pages(struct intel_remapped_info *rem_info, 13002c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 13012c86e55dSMatthew Auld { 13022c86e55dSMatthew Auld unsigned int size = intel_remapped_info_size(rem_info); 130352ce7074SWambui Karuga struct drm_i915_private *i915 = to_i915(obj->base.dev); 13042c86e55dSMatthew Auld struct sg_table *st; 13052c86e55dSMatthew Auld struct scatterlist *sg; 13062c86e55dSMatthew Auld int ret = -ENOMEM; 13072c86e55dSMatthew Auld int i; 13082c86e55dSMatthew Auld 13092c86e55dSMatthew Auld /* Allocate target SG list. */ 13102c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 13112c86e55dSMatthew Auld if (!st) 13122c86e55dSMatthew Auld goto err_st_alloc; 13132c86e55dSMatthew Auld 13142c86e55dSMatthew Auld ret = sg_alloc_table(st, size, GFP_KERNEL); 13152c86e55dSMatthew Auld if (ret) 13162c86e55dSMatthew Auld goto err_sg_alloc; 13172c86e55dSMatthew Auld 13182c86e55dSMatthew Auld st->nents = 0; 13192c86e55dSMatthew Auld sg = st->sgl; 13202c86e55dSMatthew Auld 13212c86e55dSMatthew Auld for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 13222c86e55dSMatthew Auld sg = remap_pages(obj, rem_info->plane[i].offset, 13232c86e55dSMatthew Auld rem_info->plane[i].width, rem_info->plane[i].height, 13242c86e55dSMatthew Auld rem_info->plane[i].stride, st, sg); 13252c86e55dSMatthew Auld } 13262c86e55dSMatthew Auld 13272c86e55dSMatthew Auld i915_sg_trim(st); 13282c86e55dSMatthew Auld 13292c86e55dSMatthew Auld return st; 13302c86e55dSMatthew Auld 13312c86e55dSMatthew Auld err_sg_alloc: 13322c86e55dSMatthew Auld kfree(st); 13332c86e55dSMatthew Auld err_st_alloc: 13342c86e55dSMatthew Auld 133552ce7074SWambui Karuga drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", 133652ce7074SWambui Karuga obj->base.size, rem_info->plane[0].width, 133752ce7074SWambui Karuga rem_info->plane[0].height, size); 13382c86e55dSMatthew Auld 13392c86e55dSMatthew Auld return ERR_PTR(ret); 13402c86e55dSMatthew Auld } 13412c86e55dSMatthew Auld 13422c86e55dSMatthew Auld static noinline struct sg_table * 13432c86e55dSMatthew Auld intel_partial_pages(const struct i915_ggtt_view *view, 13442c86e55dSMatthew Auld struct drm_i915_gem_object *obj) 13452c86e55dSMatthew Auld { 13462c86e55dSMatthew Auld struct sg_table *st; 13472c86e55dSMatthew Auld struct scatterlist *sg, *iter; 13482c86e55dSMatthew Auld unsigned int count = view->partial.size; 13492c86e55dSMatthew Auld unsigned int offset; 13502c86e55dSMatthew Auld int ret = -ENOMEM; 13512c86e55dSMatthew Auld 13522c86e55dSMatthew Auld st = kmalloc(sizeof(*st), GFP_KERNEL); 13532c86e55dSMatthew Auld if (!st) 13542c86e55dSMatthew Auld goto err_st_alloc; 13552c86e55dSMatthew Auld 13562c86e55dSMatthew Auld ret = sg_alloc_table(st, count, GFP_KERNEL); 13572c86e55dSMatthew Auld if (ret) 13582c86e55dSMatthew Auld goto err_sg_alloc; 13592c86e55dSMatthew Auld 13602c86e55dSMatthew Auld iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); 13612c86e55dSMatthew Auld GEM_BUG_ON(!iter); 13622c86e55dSMatthew Auld 13632c86e55dSMatthew Auld sg = st->sgl; 13642c86e55dSMatthew Auld st->nents = 0; 13652c86e55dSMatthew Auld do { 13662c86e55dSMatthew Auld unsigned int len; 13672c86e55dSMatthew Auld 13682c86e55dSMatthew Auld len = min(iter->length - (offset << PAGE_SHIFT), 13692c86e55dSMatthew Auld count << PAGE_SHIFT); 13702c86e55dSMatthew Auld sg_set_page(sg, NULL, len, 0); 13712c86e55dSMatthew Auld sg_dma_address(sg) = 13722c86e55dSMatthew Auld sg_dma_address(iter) + (offset << PAGE_SHIFT); 13732c86e55dSMatthew Auld sg_dma_len(sg) = len; 13742c86e55dSMatthew Auld 13752c86e55dSMatthew Auld st->nents++; 13762c86e55dSMatthew Auld count -= len >> PAGE_SHIFT; 13772c86e55dSMatthew Auld if (count == 0) { 13782c86e55dSMatthew Auld sg_mark_end(sg); 13792c86e55dSMatthew Auld i915_sg_trim(st); /* Drop any unused tail entries. */ 13802c86e55dSMatthew Auld 13812c86e55dSMatthew Auld return st; 13822c86e55dSMatthew Auld } 13832c86e55dSMatthew Auld 13842c86e55dSMatthew Auld sg = __sg_next(sg); 13852c86e55dSMatthew Auld iter = __sg_next(iter); 13862c86e55dSMatthew Auld offset = 0; 13872c86e55dSMatthew Auld } while (1); 13882c86e55dSMatthew Auld 13892c86e55dSMatthew Auld err_sg_alloc: 13902c86e55dSMatthew Auld kfree(st); 13912c86e55dSMatthew Auld err_st_alloc: 13922c86e55dSMatthew Auld return ERR_PTR(ret); 13932c86e55dSMatthew Auld } 13942c86e55dSMatthew Auld 13952c86e55dSMatthew Auld static int 13962c86e55dSMatthew Auld i915_get_ggtt_vma_pages(struct i915_vma *vma) 13972c86e55dSMatthew Auld { 13982c86e55dSMatthew Auld int ret; 13992c86e55dSMatthew Auld 14002c86e55dSMatthew Auld /* 14012c86e55dSMatthew Auld * The vma->pages are only valid within the lifespan of the borrowed 14022c86e55dSMatthew Auld * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so 14032c86e55dSMatthew Auld * must be the vma->pages. A simple rule is that vma->pages must only 14042c86e55dSMatthew Auld * be accessed when the obj->mm.pages are pinned. 14052c86e55dSMatthew Auld */ 14062c86e55dSMatthew Auld GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 14072c86e55dSMatthew Auld 14082c86e55dSMatthew Auld switch (vma->ggtt_view.type) { 14092c86e55dSMatthew Auld default: 14102c86e55dSMatthew Auld GEM_BUG_ON(vma->ggtt_view.type); 14112c86e55dSMatthew Auld /* fall through */ 14122c86e55dSMatthew Auld case I915_GGTT_VIEW_NORMAL: 14132c86e55dSMatthew Auld vma->pages = vma->obj->mm.pages; 14142c86e55dSMatthew Auld return 0; 14152c86e55dSMatthew Auld 14162c86e55dSMatthew Auld case I915_GGTT_VIEW_ROTATED: 14172c86e55dSMatthew Auld vma->pages = 14182c86e55dSMatthew Auld intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); 14192c86e55dSMatthew Auld break; 14202c86e55dSMatthew Auld 14212c86e55dSMatthew Auld case I915_GGTT_VIEW_REMAPPED: 14222c86e55dSMatthew Auld vma->pages = 14232c86e55dSMatthew Auld intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); 14242c86e55dSMatthew Auld break; 14252c86e55dSMatthew Auld 14262c86e55dSMatthew Auld case I915_GGTT_VIEW_PARTIAL: 14272c86e55dSMatthew Auld vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 14282c86e55dSMatthew Auld break; 14292c86e55dSMatthew Auld } 14302c86e55dSMatthew Auld 14312c86e55dSMatthew Auld ret = 0; 14322c86e55dSMatthew Auld if (IS_ERR(vma->pages)) { 14332c86e55dSMatthew Auld ret = PTR_ERR(vma->pages); 14342c86e55dSMatthew Auld vma->pages = NULL; 143552ce7074SWambui Karuga drm_err(&vma->vm->i915->drm, 143652ce7074SWambui Karuga "Failed to get pages for VMA view type %u (%d)!\n", 14372c86e55dSMatthew Auld vma->ggtt_view.type, ret); 14382c86e55dSMatthew Auld } 14392c86e55dSMatthew Auld return ret; 14402c86e55dSMatthew Auld } 1441