1b42fe9caSJoonas Lahtinen /* 2b42fe9caSJoonas Lahtinen * Copyright © 2016 Intel Corporation 3b42fe9caSJoonas Lahtinen * 4b42fe9caSJoonas Lahtinen * Permission is hereby granted, free of charge, to any person obtaining a 5b42fe9caSJoonas Lahtinen * copy of this software and associated documentation files (the "Software"), 6b42fe9caSJoonas Lahtinen * to deal in the Software without restriction, including without limitation 7b42fe9caSJoonas Lahtinen * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8b42fe9caSJoonas Lahtinen * and/or sell copies of the Software, and to permit persons to whom the 9b42fe9caSJoonas Lahtinen * Software is furnished to do so, subject to the following conditions: 10b42fe9caSJoonas Lahtinen * 11b42fe9caSJoonas Lahtinen * The above copyright notice and this permission notice (including the next 12b42fe9caSJoonas Lahtinen * paragraph) shall be included in all copies or substantial portions of the 13b42fe9caSJoonas Lahtinen * Software. 14b42fe9caSJoonas Lahtinen * 15b42fe9caSJoonas Lahtinen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16b42fe9caSJoonas Lahtinen * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17b42fe9caSJoonas Lahtinen * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18b42fe9caSJoonas Lahtinen * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19b42fe9caSJoonas Lahtinen * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20b42fe9caSJoonas Lahtinen * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21b42fe9caSJoonas Lahtinen * IN THE SOFTWARE. 22b42fe9caSJoonas Lahtinen * 23b42fe9caSJoonas Lahtinen */ 24b42fe9caSJoonas Lahtinen 2509480072SChris Wilson #include <linux/sched/mm.h> 26df0566a6SJani Nikula #include <drm/drm_gem.h> 27112ed2d3SChris Wilson 28df0566a6SJani Nikula #include "display/intel_frontbuffer.h" 29df0566a6SJani Nikula 30df0566a6SJani Nikula #include "gt/intel_engine.h" 31ccd20945SChris Wilson #include "gt/intel_engine_heartbeat.h" 32a1c8a09eSTvrtko Ursulin #include "gt/intel_gt.h" 33ccd20945SChris Wilson #include "gt/intel_gt_requests.h" 34b42fe9caSJoonas Lahtinen 35b42fe9caSJoonas Lahtinen #include "i915_drv.h" 36103b76eeSChris Wilson #include "i915_globals.h" 372850748eSChris Wilson #include "i915_sw_fence_work.h" 38a09d9a80SJani Nikula #include "i915_trace.h" 39df0566a6SJani Nikula #include "i915_vma.h" 40b42fe9caSJoonas Lahtinen 4113f1bfd3SChris Wilson static struct i915_global_vma { 42103b76eeSChris Wilson struct i915_global base; 4313f1bfd3SChris Wilson struct kmem_cache *slab_vmas; 4413f1bfd3SChris Wilson } global; 4513f1bfd3SChris Wilson 4613f1bfd3SChris Wilson struct i915_vma *i915_vma_alloc(void) 4713f1bfd3SChris Wilson { 4813f1bfd3SChris Wilson return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); 4913f1bfd3SChris Wilson } 5013f1bfd3SChris Wilson 5113f1bfd3SChris Wilson void i915_vma_free(struct i915_vma *vma) 5213f1bfd3SChris Wilson { 5313f1bfd3SChris Wilson return kmem_cache_free(global.slab_vmas, vma); 5413f1bfd3SChris Wilson } 5513f1bfd3SChris Wilson 561eca65d9SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 5710195b1eSChris Wilson 5810195b1eSChris Wilson #include <linux/stackdepot.h> 5910195b1eSChris Wilson 6010195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason) 6110195b1eSChris Wilson { 62487f3c7fSThomas Gleixner unsigned long *entries; 63487f3c7fSThomas Gleixner unsigned int nr_entries; 6410195b1eSChris Wilson char buf[512]; 6510195b1eSChris Wilson 6610195b1eSChris Wilson if (!vma->node.stack) { 6710195b1eSChris Wilson DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 6810195b1eSChris Wilson vma->node.start, vma->node.size, reason); 6910195b1eSChris Wilson return; 7010195b1eSChris Wilson } 7110195b1eSChris Wilson 72487f3c7fSThomas Gleixner nr_entries = stack_depot_fetch(vma->node.stack, &entries); 73487f3c7fSThomas Gleixner stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); 7410195b1eSChris Wilson DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 7510195b1eSChris Wilson vma->node.start, vma->node.size, reason, buf); 7610195b1eSChris Wilson } 7710195b1eSChris Wilson 7810195b1eSChris Wilson #else 7910195b1eSChris Wilson 8010195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason) 8110195b1eSChris Wilson { 8210195b1eSChris Wilson } 8310195b1eSChris Wilson 8410195b1eSChris Wilson #endif 8510195b1eSChris Wilson 8612c255b5SChris Wilson static inline struct i915_vma *active_to_vma(struct i915_active *ref) 8712c255b5SChris Wilson { 8812c255b5SChris Wilson return container_of(ref, typeof(struct i915_vma), active); 8912c255b5SChris Wilson } 9012c255b5SChris Wilson 9112c255b5SChris Wilson static int __i915_vma_active(struct i915_active *ref) 9212c255b5SChris Wilson { 932833ddccSChris Wilson return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 9412c255b5SChris Wilson } 9512c255b5SChris Wilson 96274cbf20SChris Wilson __i915_active_call 9764d6c500SChris Wilson static void __i915_vma_retire(struct i915_active *ref) 9864d6c500SChris Wilson { 9912c255b5SChris Wilson i915_vma_put(active_to_vma(ref)); 100b42fe9caSJoonas Lahtinen } 101b42fe9caSJoonas Lahtinen 102b42fe9caSJoonas Lahtinen static struct i915_vma * 103a01cb37aSChris Wilson vma_create(struct drm_i915_gem_object *obj, 104b42fe9caSJoonas Lahtinen struct i915_address_space *vm, 105b42fe9caSJoonas Lahtinen const struct i915_ggtt_view *view) 106b42fe9caSJoonas Lahtinen { 107b42fe9caSJoonas Lahtinen struct i915_vma *vma; 108b42fe9caSJoonas Lahtinen struct rb_node *rb, **p; 109b42fe9caSJoonas Lahtinen 110e1cc3db0SChris Wilson /* The aliasing_ppgtt should never be used directly! */ 11171e51ca8SChris Wilson GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 112e1cc3db0SChris Wilson 11313f1bfd3SChris Wilson vma = i915_vma_alloc(); 114b42fe9caSJoonas Lahtinen if (vma == NULL) 115b42fe9caSJoonas Lahtinen return ERR_PTR(-ENOMEM); 116b42fe9caSJoonas Lahtinen 11776f9764cSChris Wilson kref_init(&vma->ref); 1182850748eSChris Wilson mutex_init(&vma->pages_mutex); 1192850748eSChris Wilson vma->vm = i915_vm_get(vm); 12093f2cde2SChris Wilson vma->ops = &vm->vma_ops; 121b42fe9caSJoonas Lahtinen vma->obj = obj; 122ef78f7b1SChris Wilson vma->resv = obj->base.resv; 123b42fe9caSJoonas Lahtinen vma->size = obj->base.size; 124f51455d4SChris Wilson vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 125b42fe9caSJoonas Lahtinen 126b1e3177bSChris Wilson i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); 127155ab883SChris Wilson 12809480072SChris Wilson /* Declare ourselves safe for use inside shrinkers */ 12909480072SChris Wilson if (IS_ENABLED(CONFIG_LOCKDEP)) { 13009480072SChris Wilson fs_reclaim_acquire(GFP_KERNEL); 13109480072SChris Wilson might_lock(&vma->active.mutex); 13209480072SChris Wilson fs_reclaim_release(GFP_KERNEL); 13309480072SChris Wilson } 13409480072SChris Wilson 135155ab883SChris Wilson INIT_LIST_HEAD(&vma->closed_link); 136155ab883SChris Wilson 1377c518460SChris Wilson if (view && view->type != I915_GGTT_VIEW_NORMAL) { 138b42fe9caSJoonas Lahtinen vma->ggtt_view = *view; 139b42fe9caSJoonas Lahtinen if (view->type == I915_GGTT_VIEW_PARTIAL) { 14007e19ea4SChris Wilson GEM_BUG_ON(range_overflows_t(u64, 1418bab1193SChris Wilson view->partial.offset, 1428bab1193SChris Wilson view->partial.size, 14307e19ea4SChris Wilson obj->base.size >> PAGE_SHIFT)); 1448bab1193SChris Wilson vma->size = view->partial.size; 145b42fe9caSJoonas Lahtinen vma->size <<= PAGE_SHIFT; 1467e7367d3SChris Wilson GEM_BUG_ON(vma->size > obj->base.size); 147b42fe9caSJoonas Lahtinen } else if (view->type == I915_GGTT_VIEW_ROTATED) { 1488bab1193SChris Wilson vma->size = intel_rotation_info_size(&view->rotated); 149b42fe9caSJoonas Lahtinen vma->size <<= PAGE_SHIFT; 1501a74fc0bSVille Syrjälä } else if (view->type == I915_GGTT_VIEW_REMAPPED) { 1511a74fc0bSVille Syrjälä vma->size = intel_remapped_info_size(&view->remapped); 1521a74fc0bSVille Syrjälä vma->size <<= PAGE_SHIFT; 153b42fe9caSJoonas Lahtinen } 154b42fe9caSJoonas Lahtinen } 155b42fe9caSJoonas Lahtinen 1561fcdaa7eSChris Wilson if (unlikely(vma->size > vm->total)) 1571fcdaa7eSChris Wilson goto err_vma; 1581fcdaa7eSChris Wilson 159b00ddb27SChris Wilson GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 160b00ddb27SChris Wilson 161cb593e5dSChris Wilson spin_lock(&obj->vma.lock); 162cb593e5dSChris Wilson 163b42fe9caSJoonas Lahtinen if (i915_is_ggtt(vm)) { 1641fcdaa7eSChris Wilson if (unlikely(overflows_type(vma->size, u32))) 165cb593e5dSChris Wilson goto err_unlock; 1661fcdaa7eSChris Wilson 16791d4e0aaSChris Wilson vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 168944397f0SChris Wilson i915_gem_object_get_tiling(obj), 169944397f0SChris Wilson i915_gem_object_get_stride(obj)); 1701fcdaa7eSChris Wilson if (unlikely(vma->fence_size < vma->size || /* overflow */ 1711fcdaa7eSChris Wilson vma->fence_size > vm->total)) 172cb593e5dSChris Wilson goto err_unlock; 1731fcdaa7eSChris Wilson 174f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 175944397f0SChris Wilson 17691d4e0aaSChris Wilson vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 177944397f0SChris Wilson i915_gem_object_get_tiling(obj), 178944397f0SChris Wilson i915_gem_object_get_stride(obj)); 179944397f0SChris Wilson GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 180944397f0SChris Wilson 1814dd2fbbfSChris Wilson __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 182528cbd17SChris Wilson } 183528cbd17SChris Wilson 184528cbd17SChris Wilson rb = NULL; 185528cbd17SChris Wilson p = &obj->vma.tree.rb_node; 186528cbd17SChris Wilson while (*p) { 187528cbd17SChris Wilson struct i915_vma *pos; 188528cbd17SChris Wilson long cmp; 189528cbd17SChris Wilson 190528cbd17SChris Wilson rb = *p; 191528cbd17SChris Wilson pos = rb_entry(rb, struct i915_vma, obj_node); 192528cbd17SChris Wilson 193528cbd17SChris Wilson /* 194528cbd17SChris Wilson * If the view already exists in the tree, another thread 195528cbd17SChris Wilson * already created a matching vma, so return the older instance 196528cbd17SChris Wilson * and dispose of ours. 197528cbd17SChris Wilson */ 198528cbd17SChris Wilson cmp = i915_vma_compare(pos, vm, view); 199528cbd17SChris Wilson if (cmp == 0) { 200528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 20113f1bfd3SChris Wilson i915_vma_free(vma); 202528cbd17SChris Wilson return pos; 203528cbd17SChris Wilson } 204528cbd17SChris Wilson 205528cbd17SChris Wilson if (cmp < 0) 206528cbd17SChris Wilson p = &rb->rb_right; 207528cbd17SChris Wilson else 208528cbd17SChris Wilson p = &rb->rb_left; 209528cbd17SChris Wilson } 210528cbd17SChris Wilson rb_link_node(&vma->obj_node, rb, p); 211528cbd17SChris Wilson rb_insert_color(&vma->obj_node, &obj->vma.tree); 212528cbd17SChris Wilson 213528cbd17SChris Wilson if (i915_vma_is_ggtt(vma)) 214e2189dd0SChris Wilson /* 215e2189dd0SChris Wilson * We put the GGTT vma at the start of the vma-list, followed 216e2189dd0SChris Wilson * by the ppGGTT vma. This allows us to break early when 217e2189dd0SChris Wilson * iterating over only the GGTT vma for an object, see 218e2189dd0SChris Wilson * for_each_ggtt_vma() 219e2189dd0SChris Wilson */ 220528cbd17SChris Wilson list_add(&vma->obj_link, &obj->vma.list); 221b42fe9caSJoonas Lahtinen else 222528cbd17SChris Wilson list_add_tail(&vma->obj_link, &obj->vma.list); 223528cbd17SChris Wilson 224528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 22509d7e46bSChris Wilson 226b42fe9caSJoonas Lahtinen return vma; 2271fcdaa7eSChris Wilson 228cb593e5dSChris Wilson err_unlock: 229cb593e5dSChris Wilson spin_unlock(&obj->vma.lock); 2301fcdaa7eSChris Wilson err_vma: 23113f1bfd3SChris Wilson i915_vma_free(vma); 2321fcdaa7eSChris Wilson return ERR_PTR(-E2BIG); 233b42fe9caSJoonas Lahtinen } 234b42fe9caSJoonas Lahtinen 235481a6f7dSChris Wilson static struct i915_vma * 236481a6f7dSChris Wilson vma_lookup(struct drm_i915_gem_object *obj, 237718659a6SChris Wilson struct i915_address_space *vm, 238718659a6SChris Wilson const struct i915_ggtt_view *view) 239718659a6SChris Wilson { 240718659a6SChris Wilson struct rb_node *rb; 241718659a6SChris Wilson 242528cbd17SChris Wilson rb = obj->vma.tree.rb_node; 243718659a6SChris Wilson while (rb) { 244718659a6SChris Wilson struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 245718659a6SChris Wilson long cmp; 246718659a6SChris Wilson 247718659a6SChris Wilson cmp = i915_vma_compare(vma, vm, view); 248718659a6SChris Wilson if (cmp == 0) 249718659a6SChris Wilson return vma; 250718659a6SChris Wilson 251718659a6SChris Wilson if (cmp < 0) 252718659a6SChris Wilson rb = rb->rb_right; 253718659a6SChris Wilson else 254718659a6SChris Wilson rb = rb->rb_left; 255718659a6SChris Wilson } 256718659a6SChris Wilson 257718659a6SChris Wilson return NULL; 258718659a6SChris Wilson } 259718659a6SChris Wilson 260718659a6SChris Wilson /** 261718659a6SChris Wilson * i915_vma_instance - return the singleton instance of the VMA 262718659a6SChris Wilson * @obj: parent &struct drm_i915_gem_object to be mapped 263718659a6SChris Wilson * @vm: address space in which the mapping is located 264718659a6SChris Wilson * @view: additional mapping requirements 265718659a6SChris Wilson * 266718659a6SChris Wilson * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 267718659a6SChris Wilson * the same @view characteristics. If a match is not found, one is created. 268718659a6SChris Wilson * Once created, the VMA is kept until either the object is freed, or the 269718659a6SChris Wilson * address space is closed. 270718659a6SChris Wilson * 271718659a6SChris Wilson * Returns the vma, or an error pointer. 272718659a6SChris Wilson */ 273718659a6SChris Wilson struct i915_vma * 274718659a6SChris Wilson i915_vma_instance(struct drm_i915_gem_object *obj, 275718659a6SChris Wilson struct i915_address_space *vm, 276718659a6SChris Wilson const struct i915_ggtt_view *view) 277718659a6SChris Wilson { 278718659a6SChris Wilson struct i915_vma *vma; 279718659a6SChris Wilson 280718659a6SChris Wilson GEM_BUG_ON(view && !i915_is_ggtt(vm)); 2812850748eSChris Wilson GEM_BUG_ON(!atomic_read(&vm->open)); 282718659a6SChris Wilson 283528cbd17SChris Wilson spin_lock(&obj->vma.lock); 284481a6f7dSChris Wilson vma = vma_lookup(obj, vm, view); 285528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 286528cbd17SChris Wilson 287528cbd17SChris Wilson /* vma_create() will resolve the race if another creates the vma */ 288528cbd17SChris Wilson if (unlikely(!vma)) 289a01cb37aSChris Wilson vma = vma_create(obj, vm, view); 290718659a6SChris Wilson 2914ea9527cSChris Wilson GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 292718659a6SChris Wilson return vma; 293718659a6SChris Wilson } 294718659a6SChris Wilson 2952850748eSChris Wilson struct i915_vma_work { 2962850748eSChris Wilson struct dma_fence_work base; 2972850748eSChris Wilson struct i915_vma *vma; 29854d7195fSChris Wilson struct drm_i915_gem_object *pinned; 299e3793468SChris Wilson struct i915_sw_dma_fence_cb cb; 3002850748eSChris Wilson enum i915_cache_level cache_level; 3012850748eSChris Wilson unsigned int flags; 3022850748eSChris Wilson }; 3032850748eSChris Wilson 3042850748eSChris Wilson static int __vma_bind(struct dma_fence_work *work) 3052850748eSChris Wilson { 3062850748eSChris Wilson struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 3072850748eSChris Wilson struct i915_vma *vma = vw->vma; 3082850748eSChris Wilson int err; 3092850748eSChris Wilson 3102850748eSChris Wilson err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); 3112850748eSChris Wilson if (err) 3122850748eSChris Wilson atomic_or(I915_VMA_ERROR, &vma->flags); 3132850748eSChris Wilson 3142850748eSChris Wilson return err; 3152850748eSChris Wilson } 3162850748eSChris Wilson 31754d7195fSChris Wilson static void __vma_release(struct dma_fence_work *work) 31854d7195fSChris Wilson { 31954d7195fSChris Wilson struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 32054d7195fSChris Wilson 32154d7195fSChris Wilson if (vw->pinned) 32254d7195fSChris Wilson __i915_gem_object_unpin_pages(vw->pinned); 32354d7195fSChris Wilson } 32454d7195fSChris Wilson 3252850748eSChris Wilson static const struct dma_fence_work_ops bind_ops = { 3262850748eSChris Wilson .name = "bind", 3272850748eSChris Wilson .work = __vma_bind, 32854d7195fSChris Wilson .release = __vma_release, 3292850748eSChris Wilson }; 3302850748eSChris Wilson 3312850748eSChris Wilson struct i915_vma_work *i915_vma_work(void) 3322850748eSChris Wilson { 3332850748eSChris Wilson struct i915_vma_work *vw; 3342850748eSChris Wilson 3352850748eSChris Wilson vw = kzalloc(sizeof(*vw), GFP_KERNEL); 3362850748eSChris Wilson if (!vw) 3372850748eSChris Wilson return NULL; 3382850748eSChris Wilson 3392850748eSChris Wilson dma_fence_work_init(&vw->base, &bind_ops); 3402850748eSChris Wilson vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 3412850748eSChris Wilson 3422850748eSChris Wilson return vw; 3432850748eSChris Wilson } 3442850748eSChris Wilson 345e3793468SChris Wilson int i915_vma_wait_for_bind(struct i915_vma *vma) 346e3793468SChris Wilson { 347e3793468SChris Wilson int err = 0; 348e3793468SChris Wilson 349e3793468SChris Wilson if (rcu_access_pointer(vma->active.excl.fence)) { 350e3793468SChris Wilson struct dma_fence *fence; 351e3793468SChris Wilson 352e3793468SChris Wilson rcu_read_lock(); 353e3793468SChris Wilson fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); 354e3793468SChris Wilson rcu_read_unlock(); 355e3793468SChris Wilson if (fence) { 356e3793468SChris Wilson err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); 357e3793468SChris Wilson dma_fence_put(fence); 358e3793468SChris Wilson } 359e3793468SChris Wilson } 360e3793468SChris Wilson 361e3793468SChris Wilson return err; 362e3793468SChris Wilson } 363e3793468SChris Wilson 364718659a6SChris Wilson /** 365b42fe9caSJoonas Lahtinen * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 366b42fe9caSJoonas Lahtinen * @vma: VMA to map 367b42fe9caSJoonas Lahtinen * @cache_level: mapping cache level 368b42fe9caSJoonas Lahtinen * @flags: flags like global or local mapping 3692850748eSChris Wilson * @work: preallocated worker for allocating and binding the PTE 370b42fe9caSJoonas Lahtinen * 371b42fe9caSJoonas Lahtinen * DMA addresses are taken from the scatter-gather table of this object (or of 372b42fe9caSJoonas Lahtinen * this VMA in case of non-default GGTT views) and PTE entries set up. 373b42fe9caSJoonas Lahtinen * Note that DMA addresses are also the only part of the SG table we care about. 374b42fe9caSJoonas Lahtinen */ 3752850748eSChris Wilson int i915_vma_bind(struct i915_vma *vma, 3762850748eSChris Wilson enum i915_cache_level cache_level, 3772850748eSChris Wilson u32 flags, 3782850748eSChris Wilson struct i915_vma_work *work) 379b42fe9caSJoonas Lahtinen { 380b42fe9caSJoonas Lahtinen u32 bind_flags; 381b42fe9caSJoonas Lahtinen u32 vma_flags; 382b42fe9caSJoonas Lahtinen int ret; 383b42fe9caSJoonas Lahtinen 384aa149431SChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 385aa149431SChris Wilson GEM_BUG_ON(vma->size > vma->node.size); 386aa149431SChris Wilson 387bbb8a9d7STvrtko Ursulin if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 388aa149431SChris Wilson vma->node.size, 389aa149431SChris Wilson vma->vm->total))) 390aa149431SChris Wilson return -ENODEV; 391aa149431SChris Wilson 392bbb8a9d7STvrtko Ursulin if (GEM_DEBUG_WARN_ON(!flags)) 393b42fe9caSJoonas Lahtinen return -EINVAL; 394b42fe9caSJoonas Lahtinen 3952850748eSChris Wilson bind_flags = flags; 3962850748eSChris Wilson bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 397b42fe9caSJoonas Lahtinen 3984dd2fbbfSChris Wilson vma_flags = atomic_read(&vma->flags); 3994dd2fbbfSChris Wilson vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 400b42fe9caSJoonas Lahtinen if (flags & PIN_UPDATE) 401b42fe9caSJoonas Lahtinen bind_flags |= vma_flags; 402b42fe9caSJoonas Lahtinen else 403b42fe9caSJoonas Lahtinen bind_flags &= ~vma_flags; 404b42fe9caSJoonas Lahtinen if (bind_flags == 0) 405b42fe9caSJoonas Lahtinen return 0; 406b42fe9caSJoonas Lahtinen 407fa3f46afSMatthew Auld GEM_BUG_ON(!vma->pages); 408fa3f46afSMatthew Auld 4096146e6daSDaniele Ceraolo Spurio trace_i915_vma_bind(vma, bind_flags); 4102850748eSChris Wilson if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) { 411e3793468SChris Wilson struct dma_fence *prev; 412e3793468SChris Wilson 4132850748eSChris Wilson work->vma = vma; 4142850748eSChris Wilson work->cache_level = cache_level; 4152850748eSChris Wilson work->flags = bind_flags | I915_VMA_ALLOC; 4162850748eSChris Wilson 4172850748eSChris Wilson /* 4182850748eSChris Wilson * Note we only want to chain up to the migration fence on 4192850748eSChris Wilson * the pages (not the object itself). As we don't track that, 4202850748eSChris Wilson * yet, we have to use the exclusive fence instead. 4212850748eSChris Wilson * 4222850748eSChris Wilson * Also note that we do not want to track the async vma as 4232850748eSChris Wilson * part of the obj->resv->excl_fence as it only affects 4242850748eSChris Wilson * execution and not content or object's backing store lifetime. 4252850748eSChris Wilson */ 426e3793468SChris Wilson prev = i915_active_set_exclusive(&vma->active, &work->base.dma); 42730ca04e1SChris Wilson if (prev) { 428e3793468SChris Wilson __i915_sw_fence_await_dma_fence(&work->base.chain, 429e3793468SChris Wilson prev, 430e3793468SChris Wilson &work->cb); 43130ca04e1SChris Wilson dma_fence_put(prev); 43230ca04e1SChris Wilson } 433e3793468SChris Wilson 4342850748eSChris Wilson work->base.dma.error = 0; /* enable the queue_work() */ 4352850748eSChris Wilson 43654d7195fSChris Wilson if (vma->obj) { 4372850748eSChris Wilson __i915_gem_object_pin_pages(vma->obj); 43854d7195fSChris Wilson work->pinned = vma->obj; 43954d7195fSChris Wilson } 4402850748eSChris Wilson } else { 44193f2cde2SChris Wilson ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 442b42fe9caSJoonas Lahtinen if (ret) 443b42fe9caSJoonas Lahtinen return ret; 4442850748eSChris Wilson } 445b42fe9caSJoonas Lahtinen 4464dd2fbbfSChris Wilson atomic_or(bind_flags, &vma->flags); 447b42fe9caSJoonas Lahtinen return 0; 448b42fe9caSJoonas Lahtinen } 449b42fe9caSJoonas Lahtinen 450b42fe9caSJoonas Lahtinen void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 451b42fe9caSJoonas Lahtinen { 452b42fe9caSJoonas Lahtinen void __iomem *ptr; 453b4563f59SChris Wilson int err; 454b42fe9caSJoonas Lahtinen 4552850748eSChris Wilson if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 456b4563f59SChris Wilson err = -ENODEV; 457b4563f59SChris Wilson goto err; 458b4563f59SChris Wilson } 459b42fe9caSJoonas Lahtinen 460b42fe9caSJoonas Lahtinen GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 4614dd2fbbfSChris Wilson GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 462b42fe9caSJoonas Lahtinen 4632850748eSChris Wilson ptr = READ_ONCE(vma->iomap); 464b42fe9caSJoonas Lahtinen if (ptr == NULL) { 46573ebd503SMatthew Auld ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 466b42fe9caSJoonas Lahtinen vma->node.start, 467b42fe9caSJoonas Lahtinen vma->node.size); 468b4563f59SChris Wilson if (ptr == NULL) { 469b4563f59SChris Wilson err = -ENOMEM; 470b4563f59SChris Wilson goto err; 471b4563f59SChris Wilson } 472b42fe9caSJoonas Lahtinen 4732850748eSChris Wilson if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 4742850748eSChris Wilson io_mapping_unmap(ptr); 4752850748eSChris Wilson ptr = vma->iomap; 4762850748eSChris Wilson } 477b42fe9caSJoonas Lahtinen } 478b42fe9caSJoonas Lahtinen 479b42fe9caSJoonas Lahtinen __i915_vma_pin(vma); 480b4563f59SChris Wilson 4813bd40735SChris Wilson err = i915_vma_pin_fence(vma); 482b4563f59SChris Wilson if (err) 483b4563f59SChris Wilson goto err_unpin; 484b4563f59SChris Wilson 4857125397bSChris Wilson i915_vma_set_ggtt_write(vma); 486a5972e93SChris Wilson 487a5972e93SChris Wilson /* NB Access through the GTT requires the device to be awake. */ 488b42fe9caSJoonas Lahtinen return ptr; 489b4563f59SChris Wilson 490b4563f59SChris Wilson err_unpin: 491b4563f59SChris Wilson __i915_vma_unpin(vma); 492b4563f59SChris Wilson err: 493b4563f59SChris Wilson return IO_ERR_PTR(err); 494b4563f59SChris Wilson } 495b4563f59SChris Wilson 4967125397bSChris Wilson void i915_vma_flush_writes(struct i915_vma *vma) 4977125397bSChris Wilson { 4982850748eSChris Wilson if (i915_vma_unset_ggtt_write(vma)) 499a1c8a09eSTvrtko Ursulin intel_gt_flush_ggtt_writes(vma->vm->gt); 5007125397bSChris Wilson } 5017125397bSChris Wilson 502b4563f59SChris Wilson void i915_vma_unpin_iomap(struct i915_vma *vma) 503b4563f59SChris Wilson { 504b4563f59SChris Wilson GEM_BUG_ON(vma->iomap == NULL); 505b4563f59SChris Wilson 5067125397bSChris Wilson i915_vma_flush_writes(vma); 5077125397bSChris Wilson 508b4563f59SChris Wilson i915_vma_unpin_fence(vma); 509b4563f59SChris Wilson i915_vma_unpin(vma); 510b42fe9caSJoonas Lahtinen } 511b42fe9caSJoonas Lahtinen 5126a2f59e4SChris Wilson void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 513b42fe9caSJoonas Lahtinen { 514b42fe9caSJoonas Lahtinen struct i915_vma *vma; 515b42fe9caSJoonas Lahtinen struct drm_i915_gem_object *obj; 516b42fe9caSJoonas Lahtinen 517b42fe9caSJoonas Lahtinen vma = fetch_and_zero(p_vma); 518b42fe9caSJoonas Lahtinen if (!vma) 519b42fe9caSJoonas Lahtinen return; 520b42fe9caSJoonas Lahtinen 521b42fe9caSJoonas Lahtinen obj = vma->obj; 522520ea7c5SChris Wilson GEM_BUG_ON(!obj); 523b42fe9caSJoonas Lahtinen 524b42fe9caSJoonas Lahtinen i915_vma_unpin(vma); 525b42fe9caSJoonas Lahtinen 5266a2f59e4SChris Wilson if (flags & I915_VMA_RELEASE_MAP) 5276a2f59e4SChris Wilson i915_gem_object_unpin_map(obj); 5286a2f59e4SChris Wilson 529c017cf6bSChris Wilson i915_gem_object_put(obj); 530b42fe9caSJoonas Lahtinen } 531b42fe9caSJoonas Lahtinen 532782a3e9eSChris Wilson bool i915_vma_misplaced(const struct i915_vma *vma, 533782a3e9eSChris Wilson u64 size, u64 alignment, u64 flags) 534b42fe9caSJoonas Lahtinen { 535b42fe9caSJoonas Lahtinen if (!drm_mm_node_allocated(&vma->node)) 536b42fe9caSJoonas Lahtinen return false; 537b42fe9caSJoonas Lahtinen 5382850748eSChris Wilson if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 5392850748eSChris Wilson return true; 5402850748eSChris Wilson 541b42fe9caSJoonas Lahtinen if (vma->node.size < size) 542b42fe9caSJoonas Lahtinen return true; 543b42fe9caSJoonas Lahtinen 544f51455d4SChris Wilson GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 545f51455d4SChris Wilson if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 546b42fe9caSJoonas Lahtinen return true; 547b42fe9caSJoonas Lahtinen 548b42fe9caSJoonas Lahtinen if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 549b42fe9caSJoonas Lahtinen return true; 550b42fe9caSJoonas Lahtinen 551b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_BIAS && 552b42fe9caSJoonas Lahtinen vma->node.start < (flags & PIN_OFFSET_MASK)) 553b42fe9caSJoonas Lahtinen return true; 554b42fe9caSJoonas Lahtinen 555b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_FIXED && 556b42fe9caSJoonas Lahtinen vma->node.start != (flags & PIN_OFFSET_MASK)) 557b42fe9caSJoonas Lahtinen return true; 558b42fe9caSJoonas Lahtinen 559b42fe9caSJoonas Lahtinen return false; 560b42fe9caSJoonas Lahtinen } 561b42fe9caSJoonas Lahtinen 562b42fe9caSJoonas Lahtinen void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 563b42fe9caSJoonas Lahtinen { 564b42fe9caSJoonas Lahtinen bool mappable, fenceable; 565b42fe9caSJoonas Lahtinen 566944397f0SChris Wilson GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 567944397f0SChris Wilson GEM_BUG_ON(!vma->fence_size); 568b42fe9caSJoonas Lahtinen 569944397f0SChris Wilson fenceable = (vma->node.size >= vma->fence_size && 570f51455d4SChris Wilson IS_ALIGNED(vma->node.start, vma->fence_alignment)); 571944397f0SChris Wilson 572944397f0SChris Wilson mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 573944397f0SChris Wilson 574944397f0SChris Wilson if (mappable && fenceable) 5754dd2fbbfSChris Wilson set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 576b42fe9caSJoonas Lahtinen else 5774dd2fbbfSChris Wilson clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 578b42fe9caSJoonas Lahtinen } 579b42fe9caSJoonas Lahtinen 58033dd8899SMatthew Auld bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 5817d1d9aeaSChris Wilson { 5827d1d9aeaSChris Wilson struct drm_mm_node *node = &vma->node; 583b42fe9caSJoonas Lahtinen struct drm_mm_node *other; 584b42fe9caSJoonas Lahtinen 585b42fe9caSJoonas Lahtinen /* 586b42fe9caSJoonas Lahtinen * On some machines we have to be careful when putting differing types 587b42fe9caSJoonas Lahtinen * of snoopable memory together to avoid the prefetcher crossing memory 588b42fe9caSJoonas Lahtinen * domains and dying. During vm initialisation, we decide whether or not 589b42fe9caSJoonas Lahtinen * these constraints apply and set the drm_mm.color_adjust 590b42fe9caSJoonas Lahtinen * appropriately. 591b42fe9caSJoonas Lahtinen */ 59233dd8899SMatthew Auld if (!i915_vm_has_cache_coloring(vma->vm)) 593b42fe9caSJoonas Lahtinen return true; 594b42fe9caSJoonas Lahtinen 5957d1d9aeaSChris Wilson /* Only valid to be called on an already inserted vma */ 5967d1d9aeaSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(node)); 5977d1d9aeaSChris Wilson GEM_BUG_ON(list_empty(&node->node_list)); 598b42fe9caSJoonas Lahtinen 5997d1d9aeaSChris Wilson other = list_prev_entry(node, node_list); 60033dd8899SMatthew Auld if (i915_node_color_differs(other, color) && 6011e0a96e5SMatthew Auld !drm_mm_hole_follows(other)) 602b42fe9caSJoonas Lahtinen return false; 603b42fe9caSJoonas Lahtinen 6047d1d9aeaSChris Wilson other = list_next_entry(node, node_list); 60533dd8899SMatthew Auld if (i915_node_color_differs(other, color) && 6061e0a96e5SMatthew Auld !drm_mm_hole_follows(node)) 607b42fe9caSJoonas Lahtinen return false; 608b42fe9caSJoonas Lahtinen 609b42fe9caSJoonas Lahtinen return true; 610b42fe9caSJoonas Lahtinen } 611b42fe9caSJoonas Lahtinen 612b42fe9caSJoonas Lahtinen /** 613b42fe9caSJoonas Lahtinen * i915_vma_insert - finds a slot for the vma in its address space 614b42fe9caSJoonas Lahtinen * @vma: the vma 615b42fe9caSJoonas Lahtinen * @size: requested size in bytes (can be larger than the VMA) 616b42fe9caSJoonas Lahtinen * @alignment: required alignment 617b42fe9caSJoonas Lahtinen * @flags: mask of PIN_* flags to use 618b42fe9caSJoonas Lahtinen * 619b42fe9caSJoonas Lahtinen * First we try to allocate some free space that meets the requirements for 620b42fe9caSJoonas Lahtinen * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 621b42fe9caSJoonas Lahtinen * preferrably the oldest idle entry to make room for the new VMA. 622b42fe9caSJoonas Lahtinen * 623b42fe9caSJoonas Lahtinen * Returns: 624b42fe9caSJoonas Lahtinen * 0 on success, negative error code otherwise. 625b42fe9caSJoonas Lahtinen */ 626b42fe9caSJoonas Lahtinen static int 627b42fe9caSJoonas Lahtinen i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 628b42fe9caSJoonas Lahtinen { 62933dd8899SMatthew Auld unsigned long color; 630b42fe9caSJoonas Lahtinen u64 start, end; 631b42fe9caSJoonas Lahtinen int ret; 632b42fe9caSJoonas Lahtinen 6334dd2fbbfSChris Wilson GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 634b42fe9caSJoonas Lahtinen GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 635b42fe9caSJoonas Lahtinen 636b42fe9caSJoonas Lahtinen size = max(size, vma->size); 637944397f0SChris Wilson alignment = max(alignment, vma->display_alignment); 638944397f0SChris Wilson if (flags & PIN_MAPPABLE) { 639944397f0SChris Wilson size = max_t(typeof(size), size, vma->fence_size); 640944397f0SChris Wilson alignment = max_t(typeof(alignment), 641944397f0SChris Wilson alignment, vma->fence_alignment); 642944397f0SChris Wilson } 643b42fe9caSJoonas Lahtinen 644f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 645f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 646f51455d4SChris Wilson GEM_BUG_ON(!is_power_of_2(alignment)); 647f51455d4SChris Wilson 648b42fe9caSJoonas Lahtinen start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 649f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 650b42fe9caSJoonas Lahtinen 651b42fe9caSJoonas Lahtinen end = vma->vm->total; 652b42fe9caSJoonas Lahtinen if (flags & PIN_MAPPABLE) 6532850748eSChris Wilson end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 654b42fe9caSJoonas Lahtinen if (flags & PIN_ZONE_4G) 655f51455d4SChris Wilson end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 656f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 657b42fe9caSJoonas Lahtinen 658b42fe9caSJoonas Lahtinen /* If binding the object/GGTT view requires more space than the entire 659b42fe9caSJoonas Lahtinen * aperture has, reject it early before evicting everything in a vain 660b42fe9caSJoonas Lahtinen * attempt to find space. 661b42fe9caSJoonas Lahtinen */ 662b42fe9caSJoonas Lahtinen if (size > end) { 663520ea7c5SChris Wilson DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 664520ea7c5SChris Wilson size, flags & PIN_MAPPABLE ? "mappable" : "total", 665b42fe9caSJoonas Lahtinen end); 6662889caa9SChris Wilson return -ENOSPC; 667b42fe9caSJoonas Lahtinen } 668b42fe9caSJoonas Lahtinen 66933dd8899SMatthew Auld color = 0; 6702850748eSChris Wilson if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) 67133dd8899SMatthew Auld color = vma->obj->cache_level; 672fa3f46afSMatthew Auld 673b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_FIXED) { 674b42fe9caSJoonas Lahtinen u64 offset = flags & PIN_OFFSET_MASK; 675f51455d4SChris Wilson if (!IS_ALIGNED(offset, alignment) || 6762850748eSChris Wilson range_overflows(offset, size, end)) 6772850748eSChris Wilson return -EINVAL; 678b42fe9caSJoonas Lahtinen 679625d988aSChris Wilson ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 68033dd8899SMatthew Auld size, offset, color, 681625d988aSChris Wilson flags); 682b42fe9caSJoonas Lahtinen if (ret) 6832850748eSChris Wilson return ret; 684b42fe9caSJoonas Lahtinen } else { 6857464284bSMatthew Auld /* 6867464284bSMatthew Auld * We only support huge gtt pages through the 48b PPGTT, 6877464284bSMatthew Auld * however we also don't want to force any alignment for 6887464284bSMatthew Auld * objects which need to be tightly packed into the low 32bits. 6897464284bSMatthew Auld * 6907464284bSMatthew Auld * Note that we assume that GGTT are limited to 4GiB for the 6917464284bSMatthew Auld * forseeable future. See also i915_ggtt_offset(). 6927464284bSMatthew Auld */ 6937464284bSMatthew Auld if (upper_32_bits(end - 1) && 6947464284bSMatthew Auld vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 695855822beSMatthew Auld /* 696855822beSMatthew Auld * We can't mix 64K and 4K PTEs in the same page-table 697855822beSMatthew Auld * (2M block), and so to avoid the ugliness and 698855822beSMatthew Auld * complexity of coloring we opt for just aligning 64K 699855822beSMatthew Auld * objects to 2M. 700855822beSMatthew Auld */ 7017464284bSMatthew Auld u64 page_alignment = 702855822beSMatthew Auld rounddown_pow_of_two(vma->page_sizes.sg | 703855822beSMatthew Auld I915_GTT_PAGE_SIZE_2M); 7047464284bSMatthew Auld 705bef27bdbSChris Wilson /* 706bef27bdbSChris Wilson * Check we don't expand for the limited Global GTT 707bef27bdbSChris Wilson * (mappable aperture is even more precious!). This 708bef27bdbSChris Wilson * also checks that we exclude the aliasing-ppgtt. 709bef27bdbSChris Wilson */ 710bef27bdbSChris Wilson GEM_BUG_ON(i915_vma_is_ggtt(vma)); 711bef27bdbSChris Wilson 7127464284bSMatthew Auld alignment = max(alignment, page_alignment); 713855822beSMatthew Auld 714855822beSMatthew Auld if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 715855822beSMatthew Auld size = round_up(size, I915_GTT_PAGE_SIZE_2M); 7167464284bSMatthew Auld } 7177464284bSMatthew Auld 718e007b19dSChris Wilson ret = i915_gem_gtt_insert(vma->vm, &vma->node, 71933dd8899SMatthew Auld size, alignment, color, 720e007b19dSChris Wilson start, end, flags); 721e007b19dSChris Wilson if (ret) 7222850748eSChris Wilson return ret; 723b42fe9caSJoonas Lahtinen 724b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->node.start < start); 725b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->node.start + vma->node.size > end); 726b42fe9caSJoonas Lahtinen } 72744a0ec0dSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 72833dd8899SMatthew Auld GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 729b42fe9caSJoonas Lahtinen 730dde01d94SChris Wilson list_add_tail(&vma->vm_link, &vma->vm->bound_list); 731b42fe9caSJoonas Lahtinen 732b42fe9caSJoonas Lahtinen return 0; 733b42fe9caSJoonas Lahtinen } 734b42fe9caSJoonas Lahtinen 73531c7effaSChris Wilson static void 736dde01d94SChris Wilson i915_vma_detach(struct i915_vma *vma) 73731c7effaSChris Wilson { 73831c7effaSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 7394dd2fbbfSChris Wilson GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 74031c7effaSChris Wilson 741520ea7c5SChris Wilson /* 742520ea7c5SChris Wilson * And finally now the object is completely decoupled from this 743520ea7c5SChris Wilson * vma, we can drop its hold on the backing storage and allow 744520ea7c5SChris Wilson * it to be reaped by the shrinker. 74531c7effaSChris Wilson */ 746dde01d94SChris Wilson list_del(&vma->vm_link); 747520ea7c5SChris Wilson } 74831c7effaSChris Wilson 7492850748eSChris Wilson static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 750b42fe9caSJoonas Lahtinen { 7512850748eSChris Wilson unsigned int bound; 7522850748eSChris Wilson bool pinned = true; 753b42fe9caSJoonas Lahtinen 7542850748eSChris Wilson bound = atomic_read(&vma->flags); 7552850748eSChris Wilson do { 7562850748eSChris Wilson if (unlikely(flags & ~bound)) 7572850748eSChris Wilson return false; 758b42fe9caSJoonas Lahtinen 7592850748eSChris Wilson if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 7602850748eSChris Wilson return false; 7612850748eSChris Wilson 7622850748eSChris Wilson if (!(bound & I915_VMA_PIN_MASK)) 7632850748eSChris Wilson goto unpinned; 7642850748eSChris Wilson 7652850748eSChris Wilson GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 7662850748eSChris Wilson } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 7672850748eSChris Wilson 7682850748eSChris Wilson return true; 7692850748eSChris Wilson 7702850748eSChris Wilson unpinned: 7712850748eSChris Wilson /* 7722850748eSChris Wilson * If pin_count==0, but we are bound, check under the lock to avoid 7732850748eSChris Wilson * racing with a concurrent i915_vma_unbind(). 7742850748eSChris Wilson */ 7752850748eSChris Wilson mutex_lock(&vma->vm->mutex); 7762850748eSChris Wilson do { 7772850748eSChris Wilson if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { 7782850748eSChris Wilson pinned = false; 7792850748eSChris Wilson break; 780b42fe9caSJoonas Lahtinen } 781b42fe9caSJoonas Lahtinen 7822850748eSChris Wilson if (unlikely(flags & ~bound)) { 7832850748eSChris Wilson pinned = false; 7842850748eSChris Wilson break; 785b42fe9caSJoonas Lahtinen } 7862850748eSChris Wilson } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 7872850748eSChris Wilson mutex_unlock(&vma->vm->mutex); 788b42fe9caSJoonas Lahtinen 7892850748eSChris Wilson return pinned; 7902850748eSChris Wilson } 791b42fe9caSJoonas Lahtinen 7922850748eSChris Wilson static int vma_get_pages(struct i915_vma *vma) 7932850748eSChris Wilson { 7942850748eSChris Wilson int err = 0; 795d36caeeaSChris Wilson 7962850748eSChris Wilson if (atomic_add_unless(&vma->pages_count, 1, 0)) 797b42fe9caSJoonas Lahtinen return 0; 798b42fe9caSJoonas Lahtinen 7992850748eSChris Wilson /* Allocations ahoy! */ 8002850748eSChris Wilson if (mutex_lock_interruptible(&vma->pages_mutex)) 8012850748eSChris Wilson return -EINTR; 8022850748eSChris Wilson 8032850748eSChris Wilson if (!atomic_read(&vma->pages_count)) { 8042850748eSChris Wilson if (vma->obj) { 8052850748eSChris Wilson err = i915_gem_object_pin_pages(vma->obj); 8062850748eSChris Wilson if (err) 8072850748eSChris Wilson goto unlock; 80831c7effaSChris Wilson } 8092850748eSChris Wilson 8102850748eSChris Wilson err = vma->ops->set_pages(vma); 81156184a20SChris Wilson if (err) { 81256184a20SChris Wilson if (vma->obj) 81356184a20SChris Wilson i915_gem_object_unpin_pages(vma->obj); 8142850748eSChris Wilson goto unlock; 8152850748eSChris Wilson } 81656184a20SChris Wilson } 8172850748eSChris Wilson atomic_inc(&vma->pages_count); 8182850748eSChris Wilson 8192850748eSChris Wilson unlock: 8202850748eSChris Wilson mutex_unlock(&vma->pages_mutex); 8212850748eSChris Wilson 8222850748eSChris Wilson return err; 8232850748eSChris Wilson } 8242850748eSChris Wilson 8252850748eSChris Wilson static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 8262850748eSChris Wilson { 8272850748eSChris Wilson /* We allocate under vma_get_pages, so beware the shrinker */ 8282850748eSChris Wilson mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); 8292850748eSChris Wilson GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 8302850748eSChris Wilson if (atomic_sub_return(count, &vma->pages_count) == 0) { 8312850748eSChris Wilson vma->ops->clear_pages(vma); 8322850748eSChris Wilson GEM_BUG_ON(vma->pages); 8332850748eSChris Wilson if (vma->obj) 8342850748eSChris Wilson i915_gem_object_unpin_pages(vma->obj); 8352850748eSChris Wilson } 8362850748eSChris Wilson mutex_unlock(&vma->pages_mutex); 8372850748eSChris Wilson } 8382850748eSChris Wilson 8392850748eSChris Wilson static void vma_put_pages(struct i915_vma *vma) 8402850748eSChris Wilson { 8412850748eSChris Wilson if (atomic_add_unless(&vma->pages_count, -1, 1)) 8422850748eSChris Wilson return; 8432850748eSChris Wilson 8442850748eSChris Wilson __vma_put_pages(vma, 1); 8452850748eSChris Wilson } 8462850748eSChris Wilson 8472850748eSChris Wilson static void vma_unbind_pages(struct i915_vma *vma) 8482850748eSChris Wilson { 8492850748eSChris Wilson unsigned int count; 8502850748eSChris Wilson 8512850748eSChris Wilson lockdep_assert_held(&vma->vm->mutex); 8522850748eSChris Wilson 8532850748eSChris Wilson /* The upper portion of pages_count is the number of bindings */ 8542850748eSChris Wilson count = atomic_read(&vma->pages_count); 8552850748eSChris Wilson count >>= I915_VMA_PAGES_BIAS; 8562850748eSChris Wilson GEM_BUG_ON(!count); 8572850748eSChris Wilson 8582850748eSChris Wilson __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 8592850748eSChris Wilson } 8602850748eSChris Wilson 8612850748eSChris Wilson int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 8622850748eSChris Wilson { 8632850748eSChris Wilson struct i915_vma_work *work = NULL; 864c0e60347SChris Wilson intel_wakeref_t wakeref = 0; 8652850748eSChris Wilson unsigned int bound; 8662850748eSChris Wilson int err; 8672850748eSChris Wilson 8682850748eSChris Wilson BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 8692850748eSChris Wilson BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 8702850748eSChris Wilson 8712850748eSChris Wilson GEM_BUG_ON(flags & PIN_UPDATE); 8722850748eSChris Wilson GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 8732850748eSChris Wilson 8742850748eSChris Wilson /* First try and grab the pin without rebinding the vma */ 8752850748eSChris Wilson if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) 8762850748eSChris Wilson return 0; 8772850748eSChris Wilson 8782850748eSChris Wilson err = vma_get_pages(vma); 8792850748eSChris Wilson if (err) 8802850748eSChris Wilson return err; 8812850748eSChris Wilson 8822850748eSChris Wilson if (flags & vma->vm->bind_async_flags) { 8832850748eSChris Wilson work = i915_vma_work(); 8842850748eSChris Wilson if (!work) { 8852850748eSChris Wilson err = -ENOMEM; 8862850748eSChris Wilson goto err_pages; 8872850748eSChris Wilson } 8882850748eSChris Wilson } 8892850748eSChris Wilson 890c0e60347SChris Wilson if (flags & PIN_GLOBAL) 891c0e60347SChris Wilson wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 892c0e60347SChris Wilson 893d0024911SChris Wilson /* 894d0024911SChris Wilson * Differentiate between user/kernel vma inside the aliasing-ppgtt. 895d0024911SChris Wilson * 896d0024911SChris Wilson * We conflate the Global GTT with the user's vma when using the 897d0024911SChris Wilson * aliasing-ppgtt, but it is still vitally important to try and 898d0024911SChris Wilson * keep the use cases distinct. For example, userptr objects are 899d0024911SChris Wilson * not allowed inside the Global GTT as that will cause lock 900d0024911SChris Wilson * inversions when we have to evict them the mmu_notifier callbacks - 901d0024911SChris Wilson * but they are allowed to be part of the user ppGTT which can never 902d0024911SChris Wilson * be mapped. As such we try to give the distinct users of the same 903d0024911SChris Wilson * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt 904d0024911SChris Wilson * and i915_ppgtt separate]. 905d0024911SChris Wilson * 906d0024911SChris Wilson * NB this may cause us to mask real lock inversions -- while the 907d0024911SChris Wilson * code is safe today, lockdep may not be able to spot future 908d0024911SChris Wilson * transgressions. 909d0024911SChris Wilson */ 910d0024911SChris Wilson err = mutex_lock_interruptible_nested(&vma->vm->mutex, 911d0024911SChris Wilson !(flags & PIN_GLOBAL)); 9122850748eSChris Wilson if (err) 9132850748eSChris Wilson goto err_fence; 9142850748eSChris Wilson 915d0024911SChris Wilson /* No more allocations allowed now we hold vm->mutex */ 916d0024911SChris Wilson 91700de702cSChris Wilson if (unlikely(i915_vma_is_closed(vma))) { 91800de702cSChris Wilson err = -ENOENT; 91900de702cSChris Wilson goto err_unlock; 92000de702cSChris Wilson } 92100de702cSChris Wilson 9222850748eSChris Wilson bound = atomic_read(&vma->flags); 9232850748eSChris Wilson if (unlikely(bound & I915_VMA_ERROR)) { 9242850748eSChris Wilson err = -ENOMEM; 9252850748eSChris Wilson goto err_unlock; 9262850748eSChris Wilson } 9272850748eSChris Wilson 9282850748eSChris Wilson if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 9292850748eSChris Wilson err = -EAGAIN; /* pins are meant to be fairly temporary */ 9302850748eSChris Wilson goto err_unlock; 9312850748eSChris Wilson } 9322850748eSChris Wilson 9332850748eSChris Wilson if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 9342850748eSChris Wilson __i915_vma_pin(vma); 9352850748eSChris Wilson goto err_unlock; 9362850748eSChris Wilson } 9372850748eSChris Wilson 9382850748eSChris Wilson err = i915_active_acquire(&vma->active); 9392850748eSChris Wilson if (err) 9402850748eSChris Wilson goto err_unlock; 9412850748eSChris Wilson 9422850748eSChris Wilson if (!(bound & I915_VMA_BIND_MASK)) { 9432850748eSChris Wilson err = i915_vma_insert(vma, size, alignment, flags); 9442850748eSChris Wilson if (err) 9452850748eSChris Wilson goto err_active; 9462850748eSChris Wilson 9472850748eSChris Wilson if (i915_is_ggtt(vma->vm)) 9482850748eSChris Wilson __i915_vma_set_map_and_fenceable(vma); 9492850748eSChris Wilson } 9502850748eSChris Wilson 9512850748eSChris Wilson GEM_BUG_ON(!vma->pages); 9522850748eSChris Wilson err = i915_vma_bind(vma, 9532850748eSChris Wilson vma->obj ? vma->obj->cache_level : 0, 9542850748eSChris Wilson flags, work); 9552850748eSChris Wilson if (err) 9562850748eSChris Wilson goto err_remove; 9572850748eSChris Wilson 9582850748eSChris Wilson /* There should only be at most 2 active bindings (user, global) */ 9592850748eSChris Wilson GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 9602850748eSChris Wilson atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 9612850748eSChris Wilson list_move_tail(&vma->vm_link, &vma->vm->bound_list); 9622850748eSChris Wilson 9632850748eSChris Wilson __i915_vma_pin(vma); 9642850748eSChris Wilson GEM_BUG_ON(!i915_vma_is_pinned(vma)); 9652850748eSChris Wilson GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 9662850748eSChris Wilson GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 9672850748eSChris Wilson 9682850748eSChris Wilson err_remove: 969dde01d94SChris Wilson if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { 970dde01d94SChris Wilson i915_vma_detach(vma); 971dde01d94SChris Wilson drm_mm_remove_node(&vma->node); 972dde01d94SChris Wilson } 9732850748eSChris Wilson err_active: 9742850748eSChris Wilson i915_active_release(&vma->active); 9752850748eSChris Wilson err_unlock: 9762850748eSChris Wilson mutex_unlock(&vma->vm->mutex); 9772850748eSChris Wilson err_fence: 9782850748eSChris Wilson if (work) 97992581f9fSChris Wilson dma_fence_work_commit_imm(&work->base); 980c0e60347SChris Wilson if (wakeref) 981c0e60347SChris Wilson intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 9822850748eSChris Wilson err_pages: 9832850748eSChris Wilson vma_put_pages(vma); 9842850748eSChris Wilson return err; 985b42fe9caSJoonas Lahtinen } 986b42fe9caSJoonas Lahtinen 987ccd20945SChris Wilson static void flush_idle_contexts(struct intel_gt *gt) 988ccd20945SChris Wilson { 989ccd20945SChris Wilson struct intel_engine_cs *engine; 990ccd20945SChris Wilson enum intel_engine_id id; 991ccd20945SChris Wilson 992ccd20945SChris Wilson for_each_engine(engine, gt, id) 993ccd20945SChris Wilson intel_engine_flush_barriers(engine); 994ccd20945SChris Wilson 995ccd20945SChris Wilson intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 996ccd20945SChris Wilson } 997ccd20945SChris Wilson 998ccd20945SChris Wilson int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) 999ccd20945SChris Wilson { 1000ccd20945SChris Wilson struct i915_address_space *vm = vma->vm; 1001ccd20945SChris Wilson int err; 1002ccd20945SChris Wilson 1003ccd20945SChris Wilson GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1004ccd20945SChris Wilson 1005ccd20945SChris Wilson do { 1006ccd20945SChris Wilson err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); 1007e3793468SChris Wilson if (err != -ENOSPC) { 1008e3793468SChris Wilson if (!err) { 1009e3793468SChris Wilson err = i915_vma_wait_for_bind(vma); 1010e3793468SChris Wilson if (err) 1011e3793468SChris Wilson i915_vma_unpin(vma); 1012e3793468SChris Wilson } 1013ccd20945SChris Wilson return err; 1014e3793468SChris Wilson } 1015ccd20945SChris Wilson 1016ccd20945SChris Wilson /* Unlike i915_vma_pin, we don't take no for an answer! */ 1017ccd20945SChris Wilson flush_idle_contexts(vm->gt); 1018ccd20945SChris Wilson if (mutex_lock_interruptible(&vm->mutex) == 0) { 1019ccd20945SChris Wilson i915_gem_evict_vm(vm); 1020ccd20945SChris Wilson mutex_unlock(&vm->mutex); 1021ccd20945SChris Wilson } 1022ccd20945SChris Wilson } while (1); 1023ccd20945SChris Wilson } 1024ccd20945SChris Wilson 102550689771SChris Wilson static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) 10263365e226SChris Wilson { 10273365e226SChris Wilson /* 10283365e226SChris Wilson * We defer actually closing, unbinding and destroying the VMA until 10293365e226SChris Wilson * the next idle point, or if the object is freed in the meantime. By 10303365e226SChris Wilson * postponing the unbind, we allow for it to be resurrected by the 10313365e226SChris Wilson * client, avoiding the work required to rebind the VMA. This is 10323365e226SChris Wilson * advantageous for DRI, where the client/server pass objects 10333365e226SChris Wilson * between themselves, temporarily opening a local VMA to the 10343365e226SChris Wilson * object, and then closing it again. The same object is then reused 10353365e226SChris Wilson * on the next frame (or two, depending on the depth of the swap queue) 10363365e226SChris Wilson * causing us to rebind the VMA once more. This ends up being a lot 10373365e226SChris Wilson * of wasted work for the steady state. 10383365e226SChris Wilson */ 103950689771SChris Wilson GEM_BUG_ON(i915_vma_is_closed(vma)); 104071e51ca8SChris Wilson list_add(&vma->closed_link, >->closed_vma); 104150689771SChris Wilson } 104250689771SChris Wilson 104350689771SChris Wilson void i915_vma_close(struct i915_vma *vma) 104450689771SChris Wilson { 104550689771SChris Wilson struct intel_gt *gt = vma->vm->gt; 104650689771SChris Wilson unsigned long flags; 104750689771SChris Wilson 104850689771SChris Wilson if (i915_vma_is_ggtt(vma)) 104950689771SChris Wilson return; 105050689771SChris Wilson 105150689771SChris Wilson GEM_BUG_ON(!atomic_read(&vma->open_count)); 105250689771SChris Wilson if (atomic_dec_and_lock_irqsave(&vma->open_count, 105350689771SChris Wilson >->closed_lock, 105450689771SChris Wilson flags)) { 105550689771SChris Wilson __vma_close(vma, gt); 105671e51ca8SChris Wilson spin_unlock_irqrestore(>->closed_lock, flags); 1057155ab883SChris Wilson } 105850689771SChris Wilson } 1059155ab883SChris Wilson 1060155ab883SChris Wilson static void __i915_vma_remove_closed(struct i915_vma *vma) 1061155ab883SChris Wilson { 106271e51ca8SChris Wilson struct intel_gt *gt = vma->vm->gt; 1063155ab883SChris Wilson 106471e51ca8SChris Wilson spin_lock_irq(>->closed_lock); 1065155ab883SChris Wilson list_del_init(&vma->closed_link); 106671e51ca8SChris Wilson spin_unlock_irq(>->closed_lock); 10673365e226SChris Wilson } 10683365e226SChris Wilson 10693365e226SChris Wilson void i915_vma_reopen(struct i915_vma *vma) 10703365e226SChris Wilson { 10712850748eSChris Wilson if (i915_vma_is_closed(vma)) 1072155ab883SChris Wilson __i915_vma_remove_closed(vma); 10733365e226SChris Wilson } 10743365e226SChris Wilson 107576f9764cSChris Wilson void i915_vma_release(struct kref *ref) 1076b42fe9caSJoonas Lahtinen { 107776f9764cSChris Wilson struct i915_vma *vma = container_of(ref, typeof(*vma), ref); 107876f9764cSChris Wilson 10792850748eSChris Wilson if (drm_mm_node_allocated(&vma->node)) { 10802850748eSChris Wilson mutex_lock(&vma->vm->mutex); 10812850748eSChris Wilson atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 10822850748eSChris Wilson WARN_ON(__i915_vma_unbind(vma)); 10832850748eSChris Wilson mutex_unlock(&vma->vm->mutex); 1084b290a78bSChris Wilson GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 10852850748eSChris Wilson } 10862850748eSChris Wilson GEM_BUG_ON(i915_vma_is_active(vma)); 1087b42fe9caSJoonas Lahtinen 1088528cbd17SChris Wilson if (vma->obj) { 1089528cbd17SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 1090528cbd17SChris Wilson 1091528cbd17SChris Wilson spin_lock(&obj->vma.lock); 1092528cbd17SChris Wilson list_del(&vma->obj_link); 10932850748eSChris Wilson rb_erase(&vma->obj_node, &obj->vma.tree); 1094528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 1095528cbd17SChris Wilson } 1096010e3e68SChris Wilson 1097155ab883SChris Wilson __i915_vma_remove_closed(vma); 10982850748eSChris Wilson i915_vm_put(vma->vm); 10993365e226SChris Wilson 11002850748eSChris Wilson i915_active_fini(&vma->active); 11012850748eSChris Wilson i915_vma_free(vma); 11023365e226SChris Wilson } 11033365e226SChris Wilson 110471e51ca8SChris Wilson void i915_vma_parked(struct intel_gt *gt) 11053365e226SChris Wilson { 11063365e226SChris Wilson struct i915_vma *vma, *next; 11073447c4c5SChris Wilson LIST_HEAD(closed); 11083365e226SChris Wilson 110971e51ca8SChris Wilson spin_lock_irq(>->closed_lock); 111071e51ca8SChris Wilson list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 11112850748eSChris Wilson struct drm_i915_gem_object *obj = vma->obj; 11122850748eSChris Wilson struct i915_address_space *vm = vma->vm; 11132850748eSChris Wilson 11142850748eSChris Wilson /* XXX All to avoid keeping a reference on i915_vma itself */ 11152850748eSChris Wilson 11162850748eSChris Wilson if (!kref_get_unless_zero(&obj->base.refcount)) 11172850748eSChris Wilson continue; 11182850748eSChris Wilson 11193447c4c5SChris Wilson if (!i915_vm_tryopen(vm)) { 11202850748eSChris Wilson i915_gem_object_put(obj); 11213447c4c5SChris Wilson continue; 11222850748eSChris Wilson } 11232850748eSChris Wilson 11243447c4c5SChris Wilson list_move(&vma->closed_link, &closed); 11253447c4c5SChris Wilson } 112671e51ca8SChris Wilson spin_unlock_irq(>->closed_lock); 11273365e226SChris Wilson 11283447c4c5SChris Wilson /* As the GT is held idle, no vma can be reopened as we destroy them */ 11293447c4c5SChris Wilson list_for_each_entry_safe(vma, next, &closed, closed_link) { 11303447c4c5SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 11313447c4c5SChris Wilson struct i915_address_space *vm = vma->vm; 11323447c4c5SChris Wilson 11333447c4c5SChris Wilson INIT_LIST_HEAD(&vma->closed_link); 113476f9764cSChris Wilson __i915_vma_put(vma); 11353447c4c5SChris Wilson 11362850748eSChris Wilson i915_gem_object_put(obj); 11372850748eSChris Wilson i915_vm_close(vm); 1138155ab883SChris Wilson } 1139b42fe9caSJoonas Lahtinen } 1140b42fe9caSJoonas Lahtinen 1141b42fe9caSJoonas Lahtinen static void __i915_vma_iounmap(struct i915_vma *vma) 1142b42fe9caSJoonas Lahtinen { 1143b42fe9caSJoonas Lahtinen GEM_BUG_ON(i915_vma_is_pinned(vma)); 1144b42fe9caSJoonas Lahtinen 1145b42fe9caSJoonas Lahtinen if (vma->iomap == NULL) 1146b42fe9caSJoonas Lahtinen return; 1147b42fe9caSJoonas Lahtinen 1148b42fe9caSJoonas Lahtinen io_mapping_unmap(vma->iomap); 1149b42fe9caSJoonas Lahtinen vma->iomap = NULL; 1150b42fe9caSJoonas Lahtinen } 1151b42fe9caSJoonas Lahtinen 1152a65adaf8SChris Wilson void i915_vma_revoke_mmap(struct i915_vma *vma) 1153a65adaf8SChris Wilson { 1154cc662126SAbdiel Janulgue struct drm_vma_offset_node *node; 1155a65adaf8SChris Wilson u64 vma_offset; 1156a65adaf8SChris Wilson 1157a65adaf8SChris Wilson if (!i915_vma_has_userfault(vma)) 1158a65adaf8SChris Wilson return; 1159a65adaf8SChris Wilson 1160a65adaf8SChris Wilson GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1161a65adaf8SChris Wilson GEM_BUG_ON(!vma->obj->userfault_count); 1162a65adaf8SChris Wilson 1163cc662126SAbdiel Janulgue node = &vma->mmo->vma_node; 1164a65adaf8SChris Wilson vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 1165a65adaf8SChris Wilson unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1166a65adaf8SChris Wilson drm_vma_node_offset_addr(node) + vma_offset, 1167a65adaf8SChris Wilson vma->size, 1168a65adaf8SChris Wilson 1); 1169a65adaf8SChris Wilson 1170a65adaf8SChris Wilson i915_vma_unset_userfault(vma); 1171a65adaf8SChris Wilson if (!--vma->obj->userfault_count) 1172a65adaf8SChris Wilson list_del(&vma->obj->userfault_link); 1173a65adaf8SChris Wilson } 1174a65adaf8SChris Wilson 11752850748eSChris Wilson int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 11762850748eSChris Wilson { 11772850748eSChris Wilson int err; 11782850748eSChris Wilson 11792850748eSChris Wilson GEM_BUG_ON(!i915_vma_is_pinned(vma)); 11802850748eSChris Wilson 11812850748eSChris Wilson /* Wait for the vma to be bound before we start! */ 1182442dbc5cSChris Wilson err = i915_request_await_active(rq, &vma->active, 1183442dbc5cSChris Wilson I915_ACTIVE_AWAIT_EXCL); 11842850748eSChris Wilson if (err) 11852850748eSChris Wilson return err; 11862850748eSChris Wilson 11872850748eSChris Wilson return i915_active_add_request(&vma->active, rq); 11882850748eSChris Wilson } 11892850748eSChris Wilson 1190e6bb1d7fSChris Wilson int i915_vma_move_to_active(struct i915_vma *vma, 1191e6bb1d7fSChris Wilson struct i915_request *rq, 1192e6bb1d7fSChris Wilson unsigned int flags) 1193e6bb1d7fSChris Wilson { 1194e6bb1d7fSChris Wilson struct drm_i915_gem_object *obj = vma->obj; 1195a93615f9SChris Wilson int err; 1196e6bb1d7fSChris Wilson 11976951e589SChris Wilson assert_object_held(obj); 1198e6bb1d7fSChris Wilson 11992850748eSChris Wilson err = __i915_vma_move_to_active(vma, rq); 1200a93615f9SChris Wilson if (unlikely(err)) 1201a93615f9SChris Wilson return err; 1202e6bb1d7fSChris Wilson 1203e6bb1d7fSChris Wilson if (flags & EXEC_OBJECT_WRITE) { 1204da42104fSChris Wilson struct intel_frontbuffer *front; 1205da42104fSChris Wilson 1206da42104fSChris Wilson front = __intel_frontbuffer_get(obj); 1207da42104fSChris Wilson if (unlikely(front)) { 1208da42104fSChris Wilson if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) 1209da42104fSChris Wilson i915_active_add_request(&front->write, rq); 1210da42104fSChris Wilson intel_frontbuffer_put(front); 1211da42104fSChris Wilson } 1212e6bb1d7fSChris Wilson 1213829e8defSRodrigo Vivi dma_resv_add_excl_fence(vma->resv, &rq->fence); 1214cd2a4eafSChris Wilson obj->write_domain = I915_GEM_DOMAIN_RENDER; 1215e6bb1d7fSChris Wilson obj->read_domains = 0; 1216cd2a4eafSChris Wilson } else { 1217829e8defSRodrigo Vivi err = dma_resv_reserve_shared(vma->resv, 1); 1218cd2a4eafSChris Wilson if (unlikely(err)) 1219cd2a4eafSChris Wilson return err; 1220cd2a4eafSChris Wilson 1221829e8defSRodrigo Vivi dma_resv_add_shared_fence(vma->resv, &rq->fence); 1222cd2a4eafSChris Wilson obj->write_domain = 0; 1223e6bb1d7fSChris Wilson } 122463baf4f3SChris Wilson 122563baf4f3SChris Wilson if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) 122663baf4f3SChris Wilson i915_active_add_request(&vma->fence->active, rq); 122763baf4f3SChris Wilson 1228e6bb1d7fSChris Wilson obj->read_domains |= I915_GEM_GPU_DOMAINS; 1229a93615f9SChris Wilson obj->mm.dirty = true; 1230e6bb1d7fSChris Wilson 1231a93615f9SChris Wilson GEM_BUG_ON(!i915_vma_is_active(vma)); 1232e6bb1d7fSChris Wilson return 0; 1233e6bb1d7fSChris Wilson } 1234e6bb1d7fSChris Wilson 12352850748eSChris Wilson int __i915_vma_unbind(struct i915_vma *vma) 1236b42fe9caSJoonas Lahtinen { 1237b42fe9caSJoonas Lahtinen int ret; 1238b42fe9caSJoonas Lahtinen 12392850748eSChris Wilson lockdep_assert_held(&vma->vm->mutex); 1240b42fe9caSJoonas Lahtinen 124110195b1eSChris Wilson if (i915_vma_is_pinned(vma)) { 124210195b1eSChris Wilson vma_print_allocator(vma, "is pinned"); 1243d3e48352SChris Wilson return -EAGAIN; 124410195b1eSChris Wilson } 1245b42fe9caSJoonas Lahtinen 124660e94557SChris Wilson /* 124760e94557SChris Wilson * After confirming that no one else is pinning this vma, wait for 124860e94557SChris Wilson * any laggards who may have crept in during the wait (through 124960e94557SChris Wilson * a residual pin skipping the vm->mutex) to complete. 125060e94557SChris Wilson */ 125160e94557SChris Wilson ret = i915_vma_sync(vma); 125260e94557SChris Wilson if (ret) 125360e94557SChris Wilson return ret; 125460e94557SChris Wilson 1255b42fe9caSJoonas Lahtinen if (!drm_mm_node_allocated(&vma->node)) 12563365e226SChris Wilson return 0; 1257b42fe9caSJoonas Lahtinen 125860e94557SChris Wilson GEM_BUG_ON(i915_vma_is_pinned(vma)); 125960e94557SChris Wilson GEM_BUG_ON(i915_vma_is_active(vma)); 126060e94557SChris Wilson 1261b42fe9caSJoonas Lahtinen if (i915_vma_is_map_and_fenceable(vma)) { 12629657aaa2SChris Wilson /* Force a pagefault for domain tracking on next user access */ 12639657aaa2SChris Wilson i915_vma_revoke_mmap(vma); 12649657aaa2SChris Wilson 12657125397bSChris Wilson /* 12667125397bSChris Wilson * Check that we have flushed all writes through the GGTT 12677125397bSChris Wilson * before the unbind, other due to non-strict nature of those 12687125397bSChris Wilson * indirect writes they may end up referencing the GGTT PTE 12697125397bSChris Wilson * after the unbind. 12705424f5d7SChris Wilson * 12715424f5d7SChris Wilson * Note that we may be concurrently poking at the GGTT_WRITE 12725424f5d7SChris Wilson * bit from set-domain, as we mark all GGTT vma associated 12735424f5d7SChris Wilson * with an object. We know this is for another vma, as we 12745424f5d7SChris Wilson * are currently unbinding this one -- so if this vma will be 12755424f5d7SChris Wilson * reused, it will be refaulted and have its dirty bit set 12765424f5d7SChris Wilson * before the next write. 12777125397bSChris Wilson */ 12787125397bSChris Wilson i915_vma_flush_writes(vma); 12797125397bSChris Wilson 1280b42fe9caSJoonas Lahtinen /* release the fence reg _after_ flushing */ 12810d86ee35SChris Wilson i915_vma_revoke_fence(vma); 1282b42fe9caSJoonas Lahtinen 1283b42fe9caSJoonas Lahtinen __i915_vma_iounmap(vma); 12844dd2fbbfSChris Wilson clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1285b42fe9caSJoonas Lahtinen } 1286a65adaf8SChris Wilson GEM_BUG_ON(vma->fence); 1287a65adaf8SChris Wilson GEM_BUG_ON(i915_vma_has_userfault(vma)); 1288b42fe9caSJoonas Lahtinen 12892850748eSChris Wilson if (likely(atomic_read(&vma->vm->open))) { 1290b42fe9caSJoonas Lahtinen trace_i915_vma_unbind(vma); 129193f2cde2SChris Wilson vma->ops->unbind_vma(vma); 1292b42fe9caSJoonas Lahtinen } 12935424f5d7SChris Wilson atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), 12945424f5d7SChris Wilson &vma->flags); 1295b42fe9caSJoonas Lahtinen 1296dde01d94SChris Wilson i915_vma_detach(vma); 12972850748eSChris Wilson vma_unbind_pages(vma); 1298b42fe9caSJoonas Lahtinen 129976f9764cSChris Wilson drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 1300b42fe9caSJoonas Lahtinen return 0; 1301b42fe9caSJoonas Lahtinen } 1302b42fe9caSJoonas Lahtinen 13032850748eSChris Wilson int i915_vma_unbind(struct i915_vma *vma) 13042850748eSChris Wilson { 13052850748eSChris Wilson struct i915_address_space *vm = vma->vm; 1306c0e60347SChris Wilson intel_wakeref_t wakeref = 0; 13072850748eSChris Wilson int err; 13082850748eSChris Wilson 1309e6ba7648SChris Wilson if (!drm_mm_node_allocated(&vma->node)) 1310e6ba7648SChris Wilson return 0; 1311e6ba7648SChris Wilson 1312d62f416fSChris Wilson /* Optimistic wait before taking the mutex */ 1313d62f416fSChris Wilson err = i915_vma_sync(vma); 1314d62f416fSChris Wilson if (err) 1315d62f416fSChris Wilson goto out_rpm; 1316d62f416fSChris Wilson 1317614654abSChris Wilson if (i915_vma_is_pinned(vma)) { 1318614654abSChris Wilson vma_print_allocator(vma, "is pinned"); 1319614654abSChris Wilson return -EAGAIN; 1320614654abSChris Wilson } 1321614654abSChris Wilson 1322614654abSChris Wilson if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 1323614654abSChris Wilson /* XXX not always required: nop_clear_range */ 1324614654abSChris Wilson wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 1325614654abSChris Wilson 1326d0024911SChris Wilson err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); 13272850748eSChris Wilson if (err) 1328d62f416fSChris Wilson goto out_rpm; 13292850748eSChris Wilson 13302850748eSChris Wilson err = __i915_vma_unbind(vma); 13312850748eSChris Wilson mutex_unlock(&vm->mutex); 13322850748eSChris Wilson 1333d62f416fSChris Wilson out_rpm: 1334c0e60347SChris Wilson if (wakeref) 1335c0e60347SChris Wilson intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 13362850748eSChris Wilson return err; 13372850748eSChris Wilson } 13382850748eSChris Wilson 13391aff1903SChris Wilson struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 13401aff1903SChris Wilson { 13411aff1903SChris Wilson i915_gem_object_make_unshrinkable(vma->obj); 13421aff1903SChris Wilson return vma; 13431aff1903SChris Wilson } 13441aff1903SChris Wilson 13451aff1903SChris Wilson void i915_vma_make_shrinkable(struct i915_vma *vma) 13461aff1903SChris Wilson { 13471aff1903SChris Wilson i915_gem_object_make_shrinkable(vma->obj); 13481aff1903SChris Wilson } 13491aff1903SChris Wilson 13501aff1903SChris Wilson void i915_vma_make_purgeable(struct i915_vma *vma) 13511aff1903SChris Wilson { 13521aff1903SChris Wilson i915_gem_object_make_purgeable(vma->obj); 13531aff1903SChris Wilson } 13541aff1903SChris Wilson 1355e3c7a1c5SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1356e3c7a1c5SChris Wilson #include "selftests/i915_vma.c" 1357e3c7a1c5SChris Wilson #endif 135813f1bfd3SChris Wilson 1359103b76eeSChris Wilson static void i915_global_vma_shrink(void) 1360103b76eeSChris Wilson { 1361103b76eeSChris Wilson kmem_cache_shrink(global.slab_vmas); 1362103b76eeSChris Wilson } 1363103b76eeSChris Wilson 1364103b76eeSChris Wilson static void i915_global_vma_exit(void) 1365103b76eeSChris Wilson { 1366103b76eeSChris Wilson kmem_cache_destroy(global.slab_vmas); 1367103b76eeSChris Wilson } 1368103b76eeSChris Wilson 1369103b76eeSChris Wilson static struct i915_global_vma global = { { 1370103b76eeSChris Wilson .shrink = i915_global_vma_shrink, 1371103b76eeSChris Wilson .exit = i915_global_vma_exit, 1372103b76eeSChris Wilson } }; 1373103b76eeSChris Wilson 137413f1bfd3SChris Wilson int __init i915_global_vma_init(void) 137513f1bfd3SChris Wilson { 137613f1bfd3SChris Wilson global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 137713f1bfd3SChris Wilson if (!global.slab_vmas) 137813f1bfd3SChris Wilson return -ENOMEM; 137913f1bfd3SChris Wilson 1380103b76eeSChris Wilson i915_global_register(&global.base); 138113f1bfd3SChris Wilson return 0; 138213f1bfd3SChris Wilson } 1383