12c86e55dSMatthew Auld // SPDX-License-Identifier: MIT 22c86e55dSMatthew Auld /* 32c86e55dSMatthew Auld * Copyright © 2020 Intel Corporation 42c86e55dSMatthew Auld */ 52c86e55dSMatthew Auld 62c86e55dSMatthew Auld #include <linux/slab.h> /* fault-inject.h is not standalone! */ 72c86e55dSMatthew Auld 82c86e55dSMatthew Auld #include <linux/fault-inject.h> 98581fd40SJakub Kicinski #include <linux/sched/mm.h> 102c86e55dSMatthew Auld 112ca77606SMatthew Auld #include <drm/drm_cache.h> 122ca77606SMatthew Auld 13b508d01fSJani Nikula #include "gem/i915_gem_internal.h" 146aed5673SMatthew Auld #include "gem/i915_gem_lmem.h" 15801543b2SJani Nikula #include "i915_reg.h" 162c86e55dSMatthew Auld #include "i915_trace.h" 17a7f46d5bSTvrtko Ursulin #include "i915_utils.h" 182c86e55dSMatthew Auld #include "intel_gt.h" 1977fa9efcSMatt Roper #include "intel_gt_mcr.h" 2067804e48SJohn Harrison #include "intel_gt_print.h" 210d6419e9SMatt Roper #include "intel_gt_regs.h" 222c86e55dSMatthew Auld #include "intel_gtt.h" 232c86e55dSMatthew Auld 24a7f46d5bSTvrtko Ursulin 25a7f46d5bSTvrtko Ursulin static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) 26a7f46d5bSTvrtko Ursulin { 27a7f46d5bSTvrtko Ursulin return IS_BROXTON(i915) && i915_vtd_active(i915); 28a7f46d5bSTvrtko Ursulin } 29a7f46d5bSTvrtko Ursulin 30a7f46d5bSTvrtko Ursulin bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) 31a7f46d5bSTvrtko Ursulin { 32a7f46d5bSTvrtko Ursulin return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); 33a7f46d5bSTvrtko Ursulin } 34a7f46d5bSTvrtko Ursulin 356aed5673SMatthew Auld struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) 366aed5673SMatthew Auld { 376aed5673SMatthew Auld struct drm_i915_gem_object *obj; 386aed5673SMatthew Auld 3932334c9bSMatthew Auld /* 4032334c9bSMatthew Auld * To avoid severe over-allocation when dealing with min_page_size 4132334c9bSMatthew Auld * restrictions, we override that behaviour here by allowing an object 4232334c9bSMatthew Auld * size and page layout which can be smaller. In practice this should be 4332334c9bSMatthew Auld * totally fine, since GTT paging structures are not typically inserted 4432334c9bSMatthew Auld * into the GTT. 4532334c9bSMatthew Auld * 4632334c9bSMatthew Auld * Note that we also hit this path for the scratch page, and for this 4732334c9bSMatthew Auld * case it might need to be 64K, but that should work fine here since we 4832334c9bSMatthew Auld * used the passed in size for the page size, which should ensure it 4932334c9bSMatthew Auld * also has the same alignment. 5032334c9bSMatthew Auld */ 51a259cc14SThomas Hellström obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 52a259cc14SThomas Hellström vm->lmem_pt_obj_flags); 536aed5673SMatthew Auld /* 546aed5673SMatthew Auld * Ensure all paging structures for this vm share the same dma-resv 556aed5673SMatthew Auld * object underneath, with the idea that one object_lock() will lock 566aed5673SMatthew Auld * them all at once. 576aed5673SMatthew Auld */ 584d8151aeSThomas Hellström if (!IS_ERR(obj)) { 594d8151aeSThomas Hellström obj->base.resv = i915_vm_resv_get(vm); 604d8151aeSThomas Hellström obj->shares_resv_from = vm; 614d8151aeSThomas Hellström } 624d8151aeSThomas Hellström 636aed5673SMatthew Auld return obj; 646aed5673SMatthew Auld } 656aed5673SMatthew Auld 6689351925SChris Wilson struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) 672c86e55dSMatthew Auld { 6826ad4f8bSMaarten Lankhorst struct drm_i915_gem_object *obj; 6926ad4f8bSMaarten Lankhorst 702c86e55dSMatthew Auld if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) 712c86e55dSMatthew Auld i915_gem_shrink_all(vm->i915); 722c86e55dSMatthew Auld 7326ad4f8bSMaarten Lankhorst obj = i915_gem_object_create_internal(vm->i915, sz); 746aed5673SMatthew Auld /* 756aed5673SMatthew Auld * Ensure all paging structures for this vm share the same dma-resv 766aed5673SMatthew Auld * object underneath, with the idea that one object_lock() will lock 776aed5673SMatthew Auld * them all at once. 786aed5673SMatthew Auld */ 794d8151aeSThomas Hellström if (!IS_ERR(obj)) { 804d8151aeSThomas Hellström obj->base.resv = i915_vm_resv_get(vm); 814d8151aeSThomas Hellström obj->shares_resv_from = vm; 824d8151aeSThomas Hellström } 834d8151aeSThomas Hellström 8426ad4f8bSMaarten Lankhorst return obj; 852c86e55dSMatthew Auld } 862c86e55dSMatthew Auld 87529b9ec8SMatthew Auld int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) 882c86e55dSMatthew Auld { 896aed5673SMatthew Auld enum i915_map_type type; 90529b9ec8SMatthew Auld void *vaddr; 912c86e55dSMatthew Auld 926aed5673SMatthew Auld type = i915_coherent_map_type(vm->i915, obj, true); 936aed5673SMatthew Auld vaddr = i915_gem_object_pin_map_unlocked(obj, type); 94529b9ec8SMatthew Auld if (IS_ERR(vaddr)) 95529b9ec8SMatthew Auld return PTR_ERR(vaddr); 9626ad4f8bSMaarten Lankhorst 9726ad4f8bSMaarten Lankhorst i915_gem_object_make_unshrinkable(obj); 9826ad4f8bSMaarten Lankhorst return 0; 9926ad4f8bSMaarten Lankhorst } 10026ad4f8bSMaarten Lankhorst 101529b9ec8SMatthew Auld int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) 10226ad4f8bSMaarten Lankhorst { 1036aed5673SMatthew Auld enum i915_map_type type; 104529b9ec8SMatthew Auld void *vaddr; 10526ad4f8bSMaarten Lankhorst 1066aed5673SMatthew Auld type = i915_coherent_map_type(vm->i915, obj, true); 1076aed5673SMatthew Auld vaddr = i915_gem_object_pin_map(obj, type); 108529b9ec8SMatthew Auld if (IS_ERR(vaddr)) 109529b9ec8SMatthew Auld return PTR_ERR(vaddr); 1102c86e55dSMatthew Auld 11189351925SChris Wilson i915_gem_object_make_unshrinkable(obj); 11289351925SChris Wilson return 0; 1132c86e55dSMatthew Auld } 1142c86e55dSMatthew Auld 115e1a7ab4fSThomas Hellström static void clear_vm_list(struct list_head *list) 1162c86e55dSMatthew Auld { 1172c86e55dSMatthew Auld struct i915_vma *vma, *vn; 1182c86e55dSMatthew Auld 119e1a7ab4fSThomas Hellström list_for_each_entry_safe(vma, vn, list, vm_link) { 1202c86e55dSMatthew Auld struct drm_i915_gem_object *obj = vma->obj; 1212c86e55dSMatthew Auld 122e1a7ab4fSThomas Hellström if (!i915_gem_object_get_rcu(obj)) { 123c03d9826SThomas Hellström /* 124e1a7ab4fSThomas Hellström * Object is dying, but has not yet cleared its 125e1a7ab4fSThomas Hellström * vma list. 126e1a7ab4fSThomas Hellström * Unbind the dying vma to ensure our list 127c03d9826SThomas Hellström * is completely drained. We leave the destruction to 128e1a7ab4fSThomas Hellström * the object destructor to avoid the vma 129e1a7ab4fSThomas Hellström * disappearing under it. 130c03d9826SThomas Hellström */ 1312c86e55dSMatthew Auld atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 1322c86e55dSMatthew Auld WARN_ON(__i915_vma_unbind(vma)); 1332c86e55dSMatthew Auld 134e1a7ab4fSThomas Hellström /* Remove from the unbound list */ 135e1a7ab4fSThomas Hellström list_del_init(&vma->vm_link); 136e1a7ab4fSThomas Hellström 137e1a7ab4fSThomas Hellström /* 138e1a7ab4fSThomas Hellström * Delay the vm and vm mutex freeing until the 139e1a7ab4fSThomas Hellström * object is done with destruction. 140e1a7ab4fSThomas Hellström */ 141e1a7ab4fSThomas Hellström i915_vm_resv_get(vma->vm); 142e1a7ab4fSThomas Hellström vma->vm_ddestroy = true; 143e1a7ab4fSThomas Hellström } else { 144c03d9826SThomas Hellström i915_vma_destroy_locked(vma); 1452c86e55dSMatthew Auld i915_gem_object_put(obj); 1462c86e55dSMatthew Auld } 147e1a7ab4fSThomas Hellström 148e1a7ab4fSThomas Hellström } 149e1a7ab4fSThomas Hellström } 150e1a7ab4fSThomas Hellström 151e1a7ab4fSThomas Hellström static void __i915_vm_close(struct i915_address_space *vm) 152e1a7ab4fSThomas Hellström { 153e1a7ab4fSThomas Hellström mutex_lock(&vm->mutex); 154e1a7ab4fSThomas Hellström 155e1a7ab4fSThomas Hellström clear_vm_list(&vm->bound_list); 156e1a7ab4fSThomas Hellström clear_vm_list(&vm->unbound_list); 157e1a7ab4fSThomas Hellström 158e1a7ab4fSThomas Hellström /* Check for must-fix unanticipated side-effects */ 1592c86e55dSMatthew Auld GEM_BUG_ON(!list_empty(&vm->bound_list)); 160e1a7ab4fSThomas Hellström GEM_BUG_ON(!list_empty(&vm->unbound_list)); 161ad2f9bc9SChris Wilson 1622c86e55dSMatthew Auld mutex_unlock(&vm->mutex); 1632c86e55dSMatthew Auld } 1642c86e55dSMatthew Auld 16526ad4f8bSMaarten Lankhorst /* lock the vm into the current ww, if we lock one, we lock all */ 16626ad4f8bSMaarten Lankhorst int i915_vm_lock_objects(struct i915_address_space *vm, 16726ad4f8bSMaarten Lankhorst struct i915_gem_ww_ctx *ww) 16826ad4f8bSMaarten Lankhorst { 1694d8151aeSThomas Hellström if (vm->scratch[0]->base.resv == &vm->_resv) { 17026ad4f8bSMaarten Lankhorst return i915_gem_object_lock(vm->scratch[0], ww); 17126ad4f8bSMaarten Lankhorst } else { 17226ad4f8bSMaarten Lankhorst struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 17326ad4f8bSMaarten Lankhorst 17426ad4f8bSMaarten Lankhorst /* We borrowed the scratch page from ggtt, take the top level object */ 17526ad4f8bSMaarten Lankhorst return i915_gem_object_lock(ppgtt->pd->pt.base, ww); 17626ad4f8bSMaarten Lankhorst } 17726ad4f8bSMaarten Lankhorst } 17826ad4f8bSMaarten Lankhorst 1792c86e55dSMatthew Auld void i915_address_space_fini(struct i915_address_space *vm) 1802c86e55dSMatthew Auld { 1812c86e55dSMatthew Auld drm_mm_takedown(&vm->mm); 1822c86e55dSMatthew Auld } 1832c86e55dSMatthew Auld 1844d8151aeSThomas Hellström /** 1854d8151aeSThomas Hellström * i915_vm_resv_release - Final struct i915_address_space destructor 1864d8151aeSThomas Hellström * @kref: Pointer to the &i915_address_space.resv_ref member. 1874d8151aeSThomas Hellström * 1884d8151aeSThomas Hellström * This function is called when the last lock sharer no longer shares the 189e1a7ab4fSThomas Hellström * &i915_address_space._resv lock, and also if we raced when 190e1a7ab4fSThomas Hellström * destroying a vma by the vma destruction 1914d8151aeSThomas Hellström */ 1924d8151aeSThomas Hellström void i915_vm_resv_release(struct kref *kref) 1934d8151aeSThomas Hellström { 1944d8151aeSThomas Hellström struct i915_address_space *vm = 1954d8151aeSThomas Hellström container_of(kref, typeof(*vm), resv_ref); 1964d8151aeSThomas Hellström 1974d8151aeSThomas Hellström dma_resv_fini(&vm->_resv); 198e1a7ab4fSThomas Hellström mutex_destroy(&vm->mutex); 199e1a7ab4fSThomas Hellström 2004d8151aeSThomas Hellström kfree(vm); 2014d8151aeSThomas Hellström } 2024d8151aeSThomas Hellström 2032c86e55dSMatthew Auld static void __i915_vm_release(struct work_struct *work) 2042c86e55dSMatthew Auld { 2052c86e55dSMatthew Auld struct i915_address_space *vm = 206dcc5d820SDaniel Vetter container_of(work, struct i915_address_space, release_work); 2072c86e55dSMatthew Auld 208e1a7ab4fSThomas Hellström __i915_vm_close(vm); 209e1a7ab4fSThomas Hellström 2102f6b90daSThomas Hellström /* Synchronize async unbinds. */ 2112f6b90daSThomas Hellström i915_vma_resource_bind_dep_sync_all(vm); 2122f6b90daSThomas Hellström 2132c86e55dSMatthew Auld vm->cleanup(vm); 2142c86e55dSMatthew Auld i915_address_space_fini(vm); 2152c86e55dSMatthew Auld 2164d8151aeSThomas Hellström i915_vm_resv_put(vm); 2172c86e55dSMatthew Auld } 2182c86e55dSMatthew Auld 2192c86e55dSMatthew Auld void i915_vm_release(struct kref *kref) 2202c86e55dSMatthew Auld { 2212c86e55dSMatthew Auld struct i915_address_space *vm = 2222c86e55dSMatthew Auld container_of(kref, struct i915_address_space, ref); 2232c86e55dSMatthew Auld 2242c86e55dSMatthew Auld GEM_BUG_ON(i915_is_ggtt(vm)); 2252c86e55dSMatthew Auld trace_i915_ppgtt_release(vm); 2262c86e55dSMatthew Auld 227dcc5d820SDaniel Vetter queue_work(vm->i915->wq, &vm->release_work); 2282c86e55dSMatthew Auld } 2292c86e55dSMatthew Auld 2302c86e55dSMatthew Auld void i915_address_space_init(struct i915_address_space *vm, int subclass) 2312c86e55dSMatthew Auld { 2322c86e55dSMatthew Auld kref_init(&vm->ref); 2334d8151aeSThomas Hellström 2344d8151aeSThomas Hellström /* 2354d8151aeSThomas Hellström * Special case for GGTT that has already done an early 2364d8151aeSThomas Hellström * kref_init here. 2374d8151aeSThomas Hellström */ 2384d8151aeSThomas Hellström if (!kref_read(&vm->resv_ref)) 2394d8151aeSThomas Hellström kref_init(&vm->resv_ref); 2404d8151aeSThomas Hellström 2412f6b90daSThomas Hellström vm->pending_unbind = RB_ROOT_CACHED; 242dcc5d820SDaniel Vetter INIT_WORK(&vm->release_work, __i915_vm_release); 2432c86e55dSMatthew Auld 2442c86e55dSMatthew Auld /* 2452c86e55dSMatthew Auld * The vm->mutex must be reclaim safe (for use in the shrinker). 2462c86e55dSMatthew Auld * Do a dummy acquire now under fs_reclaim so that any allocation 2472c86e55dSMatthew Auld * attempt holding the lock is immediately reported by lockdep. 2482c86e55dSMatthew Auld */ 2492c86e55dSMatthew Auld mutex_init(&vm->mutex); 2502c86e55dSMatthew Auld lockdep_set_subclass(&vm->mutex, subclass); 251bc6f80ccSMaarten Lankhorst 252bc6f80ccSMaarten Lankhorst if (!intel_vm_no_concurrent_access_wa(vm->i915)) { 2532c86e55dSMatthew Auld i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); 254bc6f80ccSMaarten Lankhorst } else { 255bc6f80ccSMaarten Lankhorst /* 256bc6f80ccSMaarten Lankhorst * CHV + BXT VTD workaround use stop_machine(), 257bc6f80ccSMaarten Lankhorst * which is allowed to allocate memory. This means &vm->mutex 258bc6f80ccSMaarten Lankhorst * is the outer lock, and in theory we can allocate memory inside 259bc6f80ccSMaarten Lankhorst * it through stop_machine(). 260bc6f80ccSMaarten Lankhorst * 261bc6f80ccSMaarten Lankhorst * Add the annotation for this, we use trylock in shrinker. 262bc6f80ccSMaarten Lankhorst */ 263bc6f80ccSMaarten Lankhorst mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_); 264bc6f80ccSMaarten Lankhorst might_alloc(GFP_KERNEL); 265bc6f80ccSMaarten Lankhorst mutex_release(&vm->mutex.dep_map, _THIS_IP_); 266bc6f80ccSMaarten Lankhorst } 2674d8151aeSThomas Hellström dma_resv_init(&vm->_resv); 2682c86e55dSMatthew Auld 2692c86e55dSMatthew Auld GEM_BUG_ON(!vm->total); 2702c86e55dSMatthew Auld drm_mm_init(&vm->mm, 0, vm->total); 27187bd701eSMatthew Auld 27287bd701eSMatthew Auld memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT, 27387bd701eSMatthew Auld ARRAY_SIZE(vm->min_alignment)); 27487bd701eSMatthew Auld 2758133a6daSMatthew Auld if (HAS_64K_PAGES(vm->i915)) { 27687bd701eSMatthew Auld vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K; 27787bd701eSMatthew Auld vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K; 27887bd701eSMatthew Auld } 27987bd701eSMatthew Auld 2802c86e55dSMatthew Auld vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; 2812c86e55dSMatthew Auld 2822c86e55dSMatthew Auld INIT_LIST_HEAD(&vm->bound_list); 283e1a7ab4fSThomas Hellström INIT_LIST_HEAD(&vm->unbound_list); 2842c86e55dSMatthew Auld } 2852c86e55dSMatthew Auld 286529b9ec8SMatthew Auld void *__px_vaddr(struct drm_i915_gem_object *p) 287529b9ec8SMatthew Auld { 288529b9ec8SMatthew Auld enum i915_map_type type; 289529b9ec8SMatthew Auld 290529b9ec8SMatthew Auld GEM_BUG_ON(!i915_gem_object_has_pages(p)); 291529b9ec8SMatthew Auld return page_unpack_bits(p->mm.mapping, &type); 292529b9ec8SMatthew Auld } 293529b9ec8SMatthew Auld 29489351925SChris Wilson dma_addr_t __px_dma(struct drm_i915_gem_object *p) 2952c86e55dSMatthew Auld { 29689351925SChris Wilson GEM_BUG_ON(!i915_gem_object_has_pages(p)); 29789351925SChris Wilson return sg_dma_address(p->mm.pages->sgl); 2982c86e55dSMatthew Auld } 2992c86e55dSMatthew Auld 30089351925SChris Wilson struct page *__px_page(struct drm_i915_gem_object *p) 3012c86e55dSMatthew Auld { 30289351925SChris Wilson GEM_BUG_ON(!i915_gem_object_has_pages(p)); 30389351925SChris Wilson return sg_page(p->mm.pages->sgl); 3042c86e55dSMatthew Auld } 3052c86e55dSMatthew Auld 3062c86e55dSMatthew Auld void 30789351925SChris Wilson fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count) 3082c86e55dSMatthew Auld { 309529b9ec8SMatthew Auld void *vaddr = __px_vaddr(p); 31082d71e31SChris Wilson 31189351925SChris Wilson memset64(vaddr, val, count); 31261c5ed94SMichael Cheng drm_clflush_virt_range(vaddr, PAGE_SIZE); 31382d71e31SChris Wilson } 31482d71e31SChris Wilson 31589351925SChris Wilson static void poison_scratch_page(struct drm_i915_gem_object *scratch) 31689351925SChris Wilson { 317529b9ec8SMatthew Auld void *vaddr = __px_vaddr(scratch); 31889351925SChris Wilson u8 val; 31989351925SChris Wilson 32089351925SChris Wilson val = 0; 32189351925SChris Wilson if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 32289351925SChris Wilson val = POISON_FREE; 32389351925SChris Wilson 324529b9ec8SMatthew Auld memset(vaddr, val, scratch->base.size); 3252ca77606SMatthew Auld drm_clflush_virt_range(vaddr, scratch->base.size); 32689351925SChris Wilson } 32789351925SChris Wilson 32889351925SChris Wilson int setup_scratch_page(struct i915_address_space *vm) 3292c86e55dSMatthew Auld { 3302c86e55dSMatthew Auld unsigned long size; 3312c86e55dSMatthew Auld 3322c86e55dSMatthew Auld /* 3332c86e55dSMatthew Auld * In order to utilize 64K pages for an object with a size < 2M, we will 3342c86e55dSMatthew Auld * need to support a 64K scratch page, given that every 16th entry for a 3352c86e55dSMatthew Auld * page-table operating in 64K mode must point to a properly aligned 64K 3362c86e55dSMatthew Auld * region, including any PTEs which happen to point to scratch. 3372c86e55dSMatthew Auld * 3382c86e55dSMatthew Auld * This is only relevant for the 48b PPGTT where we support 3392c86e55dSMatthew Auld * huge-gtt-pages, see also i915_vma_insert(). However, as we share the 3402c86e55dSMatthew Auld * scratch (read-only) between all vm, we create one 64k scratch page 3412c86e55dSMatthew Auld * for all. 3422c86e55dSMatthew Auld */ 3432c86e55dSMatthew Auld size = I915_GTT_PAGE_SIZE_4K; 3442c86e55dSMatthew Auld if (i915_vm_is_4lvl(vm) && 3458133a6daSMatthew Auld HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K) && 3468133a6daSMatthew Auld !HAS_64K_PAGES(vm->i915)) 3472c86e55dSMatthew Auld size = I915_GTT_PAGE_SIZE_64K; 3482c86e55dSMatthew Auld 3492c86e55dSMatthew Auld do { 35089351925SChris Wilson struct drm_i915_gem_object *obj; 3512c86e55dSMatthew Auld 352fef53be0SMatthew Auld obj = vm->alloc_scratch_dma(vm, size); 35389351925SChris Wilson if (IS_ERR(obj)) 3542c86e55dSMatthew Auld goto skip; 3552c86e55dSMatthew Auld 356529b9ec8SMatthew Auld if (map_pt_dma(vm, obj)) 35789351925SChris Wilson goto skip_obj; 35889351925SChris Wilson 35989351925SChris Wilson /* We need a single contiguous page for our scratch */ 36089351925SChris Wilson if (obj->mm.page_sizes.sg < size) 36189351925SChris Wilson goto skip_obj; 36289351925SChris Wilson 36389351925SChris Wilson /* And it needs to be correspondingly aligned */ 36489351925SChris Wilson if (__px_dma(obj) & (size - 1)) 36589351925SChris Wilson goto skip_obj; 36689351925SChris Wilson 36782d71e31SChris Wilson /* 36882d71e31SChris Wilson * Use a non-zero scratch page for debugging. 36982d71e31SChris Wilson * 37082d71e31SChris Wilson * We want a value that should be reasonably obvious 37182d71e31SChris Wilson * to spot in the error state, while also causing a GPU hang 37282d71e31SChris Wilson * if executed. We prefer using a clear page in production, so 37382d71e31SChris Wilson * should it ever be accidentally used, the effect should be 37482d71e31SChris Wilson * fairly benign. 37582d71e31SChris Wilson */ 37689351925SChris Wilson poison_scratch_page(obj); 37782d71e31SChris Wilson 37889351925SChris Wilson vm->scratch[0] = obj; 37989351925SChris Wilson vm->scratch_order = get_order(size); 3802c86e55dSMatthew Auld return 0; 3812c86e55dSMatthew Auld 38289351925SChris Wilson skip_obj: 38389351925SChris Wilson i915_gem_object_put(obj); 3842c86e55dSMatthew Auld skip: 3852c86e55dSMatthew Auld if (size == I915_GTT_PAGE_SIZE_4K) 3862c86e55dSMatthew Auld return -ENOMEM; 3872c86e55dSMatthew Auld 3882c86e55dSMatthew Auld size = I915_GTT_PAGE_SIZE_4K; 3892c86e55dSMatthew Auld } while (1); 3902c86e55dSMatthew Auld } 3912c86e55dSMatthew Auld 3922c86e55dSMatthew Auld void free_scratch(struct i915_address_space *vm) 3932c86e55dSMatthew Auld { 3942c86e55dSMatthew Auld int i; 3952c86e55dSMatthew Auld 396c286558fSChris Wilson if (!vm->scratch[0]) 397c286558fSChris Wilson return; 398c286558fSChris Wilson 39989351925SChris Wilson for (i = 0; i <= vm->top; i++) 40089351925SChris Wilson i915_gem_object_put(vm->scratch[i]); 4012c86e55dSMatthew Auld } 4022c86e55dSMatthew Auld 4032c86e55dSMatthew Auld void gtt_write_workarounds(struct intel_gt *gt) 4042c86e55dSMatthew Auld { 4052c86e55dSMatthew Auld struct drm_i915_private *i915 = gt->i915; 4062c86e55dSMatthew Auld struct intel_uncore *uncore = gt->uncore; 4072c86e55dSMatthew Auld 4082c86e55dSMatthew Auld /* 4092c86e55dSMatthew Auld * This function is for gtt related workarounds. This function is 4102c86e55dSMatthew Auld * called on driver load and after a GPU reset, so you can place 4112c86e55dSMatthew Auld * workarounds here even if they get overwritten by GPU reset. 4122c86e55dSMatthew Auld */ 4132c86e55dSMatthew Auld /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ 4142c86e55dSMatthew Auld if (IS_BROADWELL(i915)) 4152c86e55dSMatthew Auld intel_uncore_write(uncore, 4162c86e55dSMatthew Auld GEN8_L3_LRA_1_GPGPU, 4172c86e55dSMatthew Auld GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); 4182c86e55dSMatthew Auld else if (IS_CHERRYVIEW(i915)) 4192c86e55dSMatthew Auld intel_uncore_write(uncore, 4202c86e55dSMatthew Auld GEN8_L3_LRA_1_GPGPU, 4212c86e55dSMatthew Auld GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); 4222c86e55dSMatthew Auld else if (IS_GEN9_LP(i915)) 4232c86e55dSMatthew Auld intel_uncore_write(uncore, 4242c86e55dSMatthew Auld GEN8_L3_LRA_1_GPGPU, 4252c86e55dSMatthew Auld GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 426c816723bSLucas De Marchi else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11) 4272c86e55dSMatthew Auld intel_uncore_write(uncore, 4282c86e55dSMatthew Auld GEN8_L3_LRA_1_GPGPU, 4292c86e55dSMatthew Auld GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); 4302c86e55dSMatthew Auld 4312c86e55dSMatthew Auld /* 4322c86e55dSMatthew Auld * To support 64K PTEs we need to first enable the use of the 4332c86e55dSMatthew Auld * Intermediate-Page-Size(IPS) bit of the PDE field via some magical 4342c86e55dSMatthew Auld * mmio, otherwise the page-walker will simply ignore the IPS bit. This 4352c86e55dSMatthew Auld * shouldn't be needed after GEN10. 4362c86e55dSMatthew Auld * 4372c86e55dSMatthew Auld * 64K pages were first introduced from BDW+, although technically they 4382c86e55dSMatthew Auld * only *work* from gen9+. For pre-BDW we instead have the option for 4392c86e55dSMatthew Auld * 32K pages, but we don't currently have any support for it in our 4402c86e55dSMatthew Auld * driver. 4412c86e55dSMatthew Auld */ 4422c86e55dSMatthew Auld if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && 443c816723bSLucas De Marchi GRAPHICS_VER(i915) <= 10) 4442c86e55dSMatthew Auld intel_uncore_rmw(uncore, 4452c86e55dSMatthew Auld GEN8_GAMW_ECO_DEV_RW_IA, 4462c86e55dSMatthew Auld 0, 4472c86e55dSMatthew Auld GAMW_ECO_ENABLE_64K_IPS_FIELD); 4482c86e55dSMatthew Auld 449c816723bSLucas De Marchi if (IS_GRAPHICS_VER(i915, 8, 11)) { 4502c86e55dSMatthew Auld bool can_use_gtt_cache = true; 4512c86e55dSMatthew Auld 4522c86e55dSMatthew Auld /* 4532c86e55dSMatthew Auld * According to the BSpec if we use 2M/1G pages then we also 4542c86e55dSMatthew Auld * need to disable the GTT cache. At least on BDW we can see 4552c86e55dSMatthew Auld * visual corruption when using 2M pages, and not disabling the 4562c86e55dSMatthew Auld * GTT cache. 4572c86e55dSMatthew Auld */ 4582c86e55dSMatthew Auld if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M)) 4592c86e55dSMatthew Auld can_use_gtt_cache = false; 4602c86e55dSMatthew Auld 4612c86e55dSMatthew Auld /* WaGttCachingOffByDefault */ 4622c86e55dSMatthew Auld intel_uncore_write(uncore, 4632c86e55dSMatthew Auld HSW_GTT_CACHE_EN, 4642c86e55dSMatthew Auld can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0); 46567804e48SJohn Harrison gt_WARN_ON_ONCE(gt, can_use_gtt_cache && 4662c86e55dSMatthew Auld intel_uncore_read(uncore, 4672c86e55dSMatthew Auld HSW_GTT_CACHE_EN) == 0); 4682c86e55dSMatthew Auld } 4692c86e55dSMatthew Auld } 4702c86e55dSMatthew Auld 471b76c0deeSMadhumitha Tolakanahalli Pradeep static void xelpmp_setup_private_ppat(struct intel_uncore *uncore) 472b76c0deeSMadhumitha Tolakanahalli Pradeep { 473b76c0deeSMadhumitha Tolakanahalli Pradeep intel_uncore_write(uncore, XELPMP_PAT_INDEX(0), 474b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_0_WB); 475b76c0deeSMadhumitha Tolakanahalli Pradeep intel_uncore_write(uncore, XELPMP_PAT_INDEX(1), 476b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_1_WT); 477b76c0deeSMadhumitha Tolakanahalli Pradeep intel_uncore_write(uncore, XELPMP_PAT_INDEX(2), 478b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_3_UC); 479b76c0deeSMadhumitha Tolakanahalli Pradeep intel_uncore_write(uncore, XELPMP_PAT_INDEX(3), 480b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_0_WB | MTL_2_COH_1W); 481b76c0deeSMadhumitha Tolakanahalli Pradeep intel_uncore_write(uncore, XELPMP_PAT_INDEX(4), 482b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_0_WB | MTL_3_COH_2W); 483b76c0deeSMadhumitha Tolakanahalli Pradeep 484b76c0deeSMadhumitha Tolakanahalli Pradeep /* 485b76c0deeSMadhumitha Tolakanahalli Pradeep * Remaining PAT entries are left at the hardware-default 486b76c0deeSMadhumitha Tolakanahalli Pradeep * fully-cached setting 487b76c0deeSMadhumitha Tolakanahalli Pradeep */ 488b76c0deeSMadhumitha Tolakanahalli Pradeep } 489b76c0deeSMadhumitha Tolakanahalli Pradeep 490b76c0deeSMadhumitha Tolakanahalli Pradeep static void xelpg_setup_private_ppat(struct intel_gt *gt) 491b76c0deeSMadhumitha Tolakanahalli Pradeep { 492b76c0deeSMadhumitha Tolakanahalli Pradeep intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0), 493b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_0_WB); 494b76c0deeSMadhumitha Tolakanahalli Pradeep intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1), 495b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_1_WT); 496b76c0deeSMadhumitha Tolakanahalli Pradeep intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2), 497b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_3_UC); 498b76c0deeSMadhumitha Tolakanahalli Pradeep intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3), 499b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_0_WB | MTL_2_COH_1W); 500b76c0deeSMadhumitha Tolakanahalli Pradeep intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4), 501b76c0deeSMadhumitha Tolakanahalli Pradeep MTL_PPAT_L4_0_WB | MTL_3_COH_2W); 502b76c0deeSMadhumitha Tolakanahalli Pradeep 503b76c0deeSMadhumitha Tolakanahalli Pradeep /* 504b76c0deeSMadhumitha Tolakanahalli Pradeep * Remaining PAT entries are left at the hardware-default 505b76c0deeSMadhumitha Tolakanahalli Pradeep * fully-cached setting 506b76c0deeSMadhumitha Tolakanahalli Pradeep */ 507b76c0deeSMadhumitha Tolakanahalli Pradeep } 508b76c0deeSMadhumitha Tolakanahalli Pradeep 5092c86e55dSMatthew Auld static void tgl_setup_private_ppat(struct intel_uncore *uncore) 5102c86e55dSMatthew Auld { 5112c86e55dSMatthew Auld /* TGL doesn't support LLC or AGE settings */ 5122c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); 5132c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); 5142c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); 5152c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); 5162c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); 5172c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); 5182c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); 5192c86e55dSMatthew Auld intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); 5202c86e55dSMatthew Auld } 5212c86e55dSMatthew Auld 52277fa9efcSMatt Roper static void xehp_setup_private_ppat(struct intel_gt *gt) 52377fa9efcSMatt Roper { 52470b61208SMatt Roper enum forcewake_domains fw; 52570b61208SMatt Roper unsigned long flags; 52670b61208SMatt Roper 52770b61208SMatt Roper fw = intel_uncore_forcewake_for_reg(gt->uncore, _MMIO(XEHP_PAT_INDEX(0).reg), 52870b61208SMatt Roper FW_REG_WRITE); 52970b61208SMatt Roper intel_uncore_forcewake_get(gt->uncore, fw); 53070b61208SMatt Roper 53170b61208SMatt Roper intel_gt_mcr_lock(gt, &flags); 53270b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB); 53370b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC); 53470b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT); 53570b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC); 53670b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB); 53770b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB); 53870b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB); 53970b61208SMatt Roper intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB); 54070b61208SMatt Roper intel_gt_mcr_unlock(gt, flags); 54170b61208SMatt Roper 54270b61208SMatt Roper intel_uncore_forcewake_put(gt->uncore, fw); 54377fa9efcSMatt Roper } 54477fa9efcSMatt Roper 5456266992cSLucas De Marchi static void icl_setup_private_ppat(struct intel_uncore *uncore) 5462c86e55dSMatthew Auld { 5472c86e55dSMatthew Auld intel_uncore_write(uncore, 5482c86e55dSMatthew Auld GEN10_PAT_INDEX(0), 5492c86e55dSMatthew Auld GEN8_PPAT_WB | GEN8_PPAT_LLC); 5502c86e55dSMatthew Auld intel_uncore_write(uncore, 5512c86e55dSMatthew Auld GEN10_PAT_INDEX(1), 5522c86e55dSMatthew Auld GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); 5532c86e55dSMatthew Auld intel_uncore_write(uncore, 5542c86e55dSMatthew Auld GEN10_PAT_INDEX(2), 555c0888e9eSVille Syrjälä GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); 5562c86e55dSMatthew Auld intel_uncore_write(uncore, 5572c86e55dSMatthew Auld GEN10_PAT_INDEX(3), 5582c86e55dSMatthew Auld GEN8_PPAT_UC); 5592c86e55dSMatthew Auld intel_uncore_write(uncore, 5602c86e55dSMatthew Auld GEN10_PAT_INDEX(4), 5612c86e55dSMatthew Auld GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); 5622c86e55dSMatthew Auld intel_uncore_write(uncore, 5632c86e55dSMatthew Auld GEN10_PAT_INDEX(5), 5642c86e55dSMatthew Auld GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); 5652c86e55dSMatthew Auld intel_uncore_write(uncore, 5662c86e55dSMatthew Auld GEN10_PAT_INDEX(6), 5672c86e55dSMatthew Auld GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); 5682c86e55dSMatthew Auld intel_uncore_write(uncore, 5692c86e55dSMatthew Auld GEN10_PAT_INDEX(7), 5702c86e55dSMatthew Auld GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 5712c86e55dSMatthew Auld } 5722c86e55dSMatthew Auld 5732c86e55dSMatthew Auld /* 5742c86e55dSMatthew Auld * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 5752c86e55dSMatthew Auld * bits. When using advanced contexts each context stores its own PAT, but 5762c86e55dSMatthew Auld * writing this data shouldn't be harmful even in those cases. 5772c86e55dSMatthew Auld */ 5782c86e55dSMatthew Auld static void bdw_setup_private_ppat(struct intel_uncore *uncore) 5792c86e55dSMatthew Auld { 580c0888e9eSVille Syrjälä struct drm_i915_private *i915 = uncore->i915; 5812c86e55dSMatthew Auld u64 pat; 5822c86e55dSMatthew Auld 5832c86e55dSMatthew Auld pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 5842c86e55dSMatthew Auld GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ 5852c86e55dSMatthew Auld GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ 5862c86e55dSMatthew Auld GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | 5872c86e55dSMatthew Auld GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | 5882c86e55dSMatthew Auld GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | 5892c86e55dSMatthew Auld GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 5902c86e55dSMatthew Auld 591c0888e9eSVille Syrjälä /* for scanout with eLLC */ 592c816723bSLucas De Marchi if (GRAPHICS_VER(i915) >= 9) 593c0888e9eSVille Syrjälä pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); 594c0888e9eSVille Syrjälä else 595c0888e9eSVille Syrjälä pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); 596c0888e9eSVille Syrjälä 5972c86e55dSMatthew Auld intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); 5982c86e55dSMatthew Auld intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); 5992c86e55dSMatthew Auld } 6002c86e55dSMatthew Auld 6012c86e55dSMatthew Auld static void chv_setup_private_ppat(struct intel_uncore *uncore) 6022c86e55dSMatthew Auld { 6032c86e55dSMatthew Auld u64 pat; 6042c86e55dSMatthew Auld 6052c86e55dSMatthew Auld /* 6062c86e55dSMatthew Auld * Map WB on BDW to snooped on CHV. 6072c86e55dSMatthew Auld * 6082c86e55dSMatthew Auld * Only the snoop bit has meaning for CHV, the rest is 6092c86e55dSMatthew Auld * ignored. 6102c86e55dSMatthew Auld * 6112c86e55dSMatthew Auld * The hardware will never snoop for certain types of accesses: 6122c86e55dSMatthew Auld * - CPU GTT (GMADR->GGTT->no snoop->memory) 6132c86e55dSMatthew Auld * - PPGTT page tables 6142c86e55dSMatthew Auld * - some other special cycles 6152c86e55dSMatthew Auld * 6162c86e55dSMatthew Auld * As with BDW, we also need to consider the following for GT accesses: 6172c86e55dSMatthew Auld * "For GGTT, there is NO pat_sel[2:0] from the entry, 6182c86e55dSMatthew Auld * so RTL will always use the value corresponding to 6192c86e55dSMatthew Auld * pat_sel = 000". 6202c86e55dSMatthew Auld * Which means we must set the snoop bit in PAT entry 0 6212c86e55dSMatthew Auld * in order to keep the global status page working. 6222c86e55dSMatthew Auld */ 6232c86e55dSMatthew Auld 6242c86e55dSMatthew Auld pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | 6252c86e55dSMatthew Auld GEN8_PPAT(1, 0) | 6262c86e55dSMatthew Auld GEN8_PPAT(2, 0) | 6272c86e55dSMatthew Auld GEN8_PPAT(3, 0) | 6282c86e55dSMatthew Auld GEN8_PPAT(4, CHV_PPAT_SNOOP) | 6292c86e55dSMatthew Auld GEN8_PPAT(5, CHV_PPAT_SNOOP) | 6302c86e55dSMatthew Auld GEN8_PPAT(6, CHV_PPAT_SNOOP) | 6312c86e55dSMatthew Auld GEN8_PPAT(7, CHV_PPAT_SNOOP); 6322c86e55dSMatthew Auld 6332c86e55dSMatthew Auld intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); 6342c86e55dSMatthew Auld intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); 6352c86e55dSMatthew Auld } 6362c86e55dSMatthew Auld 63777fa9efcSMatt Roper void setup_private_pat(struct intel_gt *gt) 6382c86e55dSMatthew Auld { 63977fa9efcSMatt Roper struct intel_uncore *uncore = gt->uncore; 64077fa9efcSMatt Roper struct drm_i915_private *i915 = gt->i915; 6412c86e55dSMatthew Auld 642c816723bSLucas De Marchi GEM_BUG_ON(GRAPHICS_VER(i915) < 8); 6432c86e55dSMatthew Auld 644b76c0deeSMadhumitha Tolakanahalli Pradeep if (gt->type == GT_MEDIA) { 645b76c0deeSMadhumitha Tolakanahalli Pradeep xelpmp_setup_private_ppat(gt->uncore); 646b76c0deeSMadhumitha Tolakanahalli Pradeep return; 647b76c0deeSMadhumitha Tolakanahalli Pradeep } 648b76c0deeSMadhumitha Tolakanahalli Pradeep 649b76c0deeSMadhumitha Tolakanahalli Pradeep if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 650b76c0deeSMadhumitha Tolakanahalli Pradeep xelpg_setup_private_ppat(gt); 651b76c0deeSMadhumitha Tolakanahalli Pradeep else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) 65277fa9efcSMatt Roper xehp_setup_private_ppat(gt); 65377fa9efcSMatt Roper else if (GRAPHICS_VER(i915) >= 12) 6542c86e55dSMatthew Auld tgl_setup_private_ppat(uncore); 6556266992cSLucas De Marchi else if (GRAPHICS_VER(i915) >= 11) 6566266992cSLucas De Marchi icl_setup_private_ppat(uncore); 6572c86e55dSMatthew Auld else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) 6582c86e55dSMatthew Auld chv_setup_private_ppat(uncore); 6592c86e55dSMatthew Auld else 6602c86e55dSMatthew Auld bdw_setup_private_ppat(uncore); 6612c86e55dSMatthew Auld } 6622c86e55dSMatthew Auld 663a4d86249SChris Wilson struct i915_vma * 664a4d86249SChris Wilson __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size) 665a4d86249SChris Wilson { 666a4d86249SChris Wilson struct drm_i915_gem_object *obj; 667a4d86249SChris Wilson struct i915_vma *vma; 668a4d86249SChris Wilson 669a4d86249SChris Wilson obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size)); 670a4d86249SChris Wilson if (IS_ERR(obj)) 671a4d86249SChris Wilson return ERR_CAST(obj); 672a4d86249SChris Wilson 673*49c60b2fSTvrtko Ursulin i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 674a4d86249SChris Wilson 675a4d86249SChris Wilson vma = i915_vma_instance(obj, vm, NULL); 676a4d86249SChris Wilson if (IS_ERR(vma)) { 677a4d86249SChris Wilson i915_gem_object_put(obj); 678a4d86249SChris Wilson return vma; 679a4d86249SChris Wilson } 680a4d86249SChris Wilson 6812a665968SMaarten Lankhorst return vma; 6822a665968SMaarten Lankhorst } 6832a665968SMaarten Lankhorst 6842a665968SMaarten Lankhorst struct i915_vma * 6852a665968SMaarten Lankhorst __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size) 6862a665968SMaarten Lankhorst { 6872a665968SMaarten Lankhorst struct i915_vma *vma; 6882a665968SMaarten Lankhorst int err; 6892a665968SMaarten Lankhorst 6902a665968SMaarten Lankhorst vma = __vm_create_scratch_for_read(vm, size); 6912a665968SMaarten Lankhorst if (IS_ERR(vma)) 6922a665968SMaarten Lankhorst return vma; 6932a665968SMaarten Lankhorst 694a4d86249SChris Wilson err = i915_vma_pin(vma, 0, 0, 695a4d86249SChris Wilson i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); 696a4d86249SChris Wilson if (err) { 697a4d86249SChris Wilson i915_vma_put(vma); 698a4d86249SChris Wilson return ERR_PTR(err); 699a4d86249SChris Wilson } 700a4d86249SChris Wilson 701a4d86249SChris Wilson return vma; 702a4d86249SChris Wilson } 703a4d86249SChris Wilson 7042c86e55dSMatthew Auld #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 7052c86e55dSMatthew Auld #include "selftests/mock_gtt.c" 7062c86e55dSMatthew Auld #endif 707