1b42fe9caSJoonas Lahtinen /* 2b42fe9caSJoonas Lahtinen * Copyright © 2016 Intel Corporation 3b42fe9caSJoonas Lahtinen * 4b42fe9caSJoonas Lahtinen * Permission is hereby granted, free of charge, to any person obtaining a 5b42fe9caSJoonas Lahtinen * copy of this software and associated documentation files (the "Software"), 6b42fe9caSJoonas Lahtinen * to deal in the Software without restriction, including without limitation 7b42fe9caSJoonas Lahtinen * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8b42fe9caSJoonas Lahtinen * and/or sell copies of the Software, and to permit persons to whom the 9b42fe9caSJoonas Lahtinen * Software is furnished to do so, subject to the following conditions: 10b42fe9caSJoonas Lahtinen * 11b42fe9caSJoonas Lahtinen * The above copyright notice and this permission notice (including the next 12b42fe9caSJoonas Lahtinen * paragraph) shall be included in all copies or substantial portions of the 13b42fe9caSJoonas Lahtinen * Software. 14b42fe9caSJoonas Lahtinen * 15b42fe9caSJoonas Lahtinen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16b42fe9caSJoonas Lahtinen * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17b42fe9caSJoonas Lahtinen * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18b42fe9caSJoonas Lahtinen * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19b42fe9caSJoonas Lahtinen * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20b42fe9caSJoonas Lahtinen * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21b42fe9caSJoonas Lahtinen * IN THE SOFTWARE. 22b42fe9caSJoonas Lahtinen * 23b42fe9caSJoonas Lahtinen */ 24b42fe9caSJoonas Lahtinen 2509480072SChris Wilson #include <linux/sched/mm.h> 26df0566a6SJani Nikula #include <drm/drm_gem.h> 27112ed2d3SChris Wilson 28df0566a6SJani Nikula #include "display/intel_frontbuffer.h" 29df0566a6SJani Nikula 30df0566a6SJani Nikula #include "gt/intel_engine.h" 31ccd20945SChris Wilson #include "gt/intel_engine_heartbeat.h" 32a1c8a09eSTvrtko Ursulin #include "gt/intel_gt.h" 33ccd20945SChris Wilson #include "gt/intel_gt_requests.h" 34b42fe9caSJoonas Lahtinen 35b42fe9caSJoonas Lahtinen #include "i915_drv.h" 36103b76eeSChris Wilson #include "i915_globals.h" 372850748eSChris Wilson #include "i915_sw_fence_work.h" 38a09d9a80SJani Nikula #include "i915_trace.h" 39df0566a6SJani Nikula #include "i915_vma.h" 40b42fe9caSJoonas Lahtinen 4113f1bfd3SChris Wilson static struct i915_global_vma { 42103b76eeSChris Wilson struct i915_global base; 4313f1bfd3SChris Wilson struct kmem_cache *slab_vmas; 4413f1bfd3SChris Wilson } global; 4513f1bfd3SChris Wilson 4613f1bfd3SChris Wilson struct i915_vma *i915_vma_alloc(void) 4713f1bfd3SChris Wilson { 4813f1bfd3SChris Wilson return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); 4913f1bfd3SChris Wilson } 5013f1bfd3SChris Wilson 5113f1bfd3SChris Wilson void i915_vma_free(struct i915_vma *vma) 5213f1bfd3SChris Wilson { 5313f1bfd3SChris Wilson return kmem_cache_free(global.slab_vmas, vma); 5413f1bfd3SChris Wilson } 5513f1bfd3SChris Wilson 561eca65d9SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 5710195b1eSChris Wilson 5810195b1eSChris Wilson #include <linux/stackdepot.h> 5910195b1eSChris Wilson 6010195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason) 6110195b1eSChris Wilson { 62487f3c7fSThomas Gleixner unsigned long *entries; 63487f3c7fSThomas Gleixner unsigned int nr_entries; 6410195b1eSChris Wilson char buf[512]; 6510195b1eSChris Wilson 6610195b1eSChris Wilson if (!vma->node.stack) { 6710195b1eSChris Wilson DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 6810195b1eSChris Wilson vma->node.start, vma->node.size, reason); 6910195b1eSChris Wilson return; 7010195b1eSChris Wilson } 7110195b1eSChris Wilson 72487f3c7fSThomas Gleixner nr_entries = stack_depot_fetch(vma->node.stack, &entries); 73487f3c7fSThomas Gleixner stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); 7410195b1eSChris Wilson DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 7510195b1eSChris Wilson vma->node.start, vma->node.size, reason, buf); 7610195b1eSChris Wilson } 7710195b1eSChris Wilson 7810195b1eSChris Wilson #else 7910195b1eSChris Wilson 8010195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason) 8110195b1eSChris Wilson { 8210195b1eSChris Wilson } 8310195b1eSChris Wilson 8410195b1eSChris Wilson #endif 8510195b1eSChris Wilson 8612c255b5SChris Wilson static inline struct i915_vma *active_to_vma(struct i915_active *ref) 8712c255b5SChris Wilson { 8812c255b5SChris Wilson return container_of(ref, typeof(struct i915_vma), active); 8912c255b5SChris Wilson } 9012c255b5SChris Wilson 9112c255b5SChris Wilson static int __i915_vma_active(struct i915_active *ref) 9212c255b5SChris Wilson { 932833ddccSChris Wilson return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 9412c255b5SChris Wilson } 9512c255b5SChris Wilson 96274cbf20SChris Wilson __i915_active_call 9764d6c500SChris Wilson static void __i915_vma_retire(struct i915_active *ref) 9864d6c500SChris Wilson { 9912c255b5SChris Wilson i915_vma_put(active_to_vma(ref)); 100b42fe9caSJoonas Lahtinen } 101b42fe9caSJoonas Lahtinen 102b42fe9caSJoonas Lahtinen static struct i915_vma * 103a01cb37aSChris Wilson vma_create(struct drm_i915_gem_object *obj, 104b42fe9caSJoonas Lahtinen struct i915_address_space *vm, 105b42fe9caSJoonas Lahtinen const struct i915_ggtt_view *view) 106b42fe9caSJoonas Lahtinen { 107b42fe9caSJoonas Lahtinen struct i915_vma *vma; 108b42fe9caSJoonas Lahtinen struct rb_node *rb, **p; 109b42fe9caSJoonas Lahtinen 110e1cc3db0SChris Wilson /* The aliasing_ppgtt should never be used directly! */ 11171e51ca8SChris Wilson GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 112e1cc3db0SChris Wilson 11313f1bfd3SChris Wilson vma = i915_vma_alloc(); 114b42fe9caSJoonas Lahtinen if (vma == NULL) 115b42fe9caSJoonas Lahtinen return ERR_PTR(-ENOMEM); 116b42fe9caSJoonas Lahtinen 11776f9764cSChris Wilson kref_init(&vma->ref); 1182850748eSChris Wilson mutex_init(&vma->pages_mutex); 1192850748eSChris Wilson vma->vm = i915_vm_get(vm); 12093f2cde2SChris Wilson vma->ops = &vm->vma_ops; 121b42fe9caSJoonas Lahtinen vma->obj = obj; 122ef78f7b1SChris Wilson vma->resv = obj->base.resv; 123b42fe9caSJoonas Lahtinen vma->size = obj->base.size; 124f51455d4SChris Wilson vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 125b42fe9caSJoonas Lahtinen 126b1e3177bSChris Wilson i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); 127155ab883SChris Wilson 12809480072SChris Wilson /* Declare ourselves safe for use inside shrinkers */ 12909480072SChris Wilson if (IS_ENABLED(CONFIG_LOCKDEP)) { 13009480072SChris Wilson fs_reclaim_acquire(GFP_KERNEL); 13109480072SChris Wilson might_lock(&vma->active.mutex); 13209480072SChris Wilson fs_reclaim_release(GFP_KERNEL); 13309480072SChris Wilson } 13409480072SChris Wilson 135155ab883SChris Wilson INIT_LIST_HEAD(&vma->closed_link); 136155ab883SChris Wilson 1377c518460SChris Wilson if (view && view->type != I915_GGTT_VIEW_NORMAL) { 138b42fe9caSJoonas Lahtinen vma->ggtt_view = *view; 139b42fe9caSJoonas Lahtinen if (view->type == I915_GGTT_VIEW_PARTIAL) { 14007e19ea4SChris Wilson GEM_BUG_ON(range_overflows_t(u64, 1418bab1193SChris Wilson view->partial.offset, 1428bab1193SChris Wilson view->partial.size, 14307e19ea4SChris Wilson obj->base.size >> PAGE_SHIFT)); 1448bab1193SChris Wilson vma->size = view->partial.size; 145b42fe9caSJoonas Lahtinen vma->size <<= PAGE_SHIFT; 1467e7367d3SChris Wilson GEM_BUG_ON(vma->size > obj->base.size); 147b42fe9caSJoonas Lahtinen } else if (view->type == I915_GGTT_VIEW_ROTATED) { 1488bab1193SChris Wilson vma->size = intel_rotation_info_size(&view->rotated); 149b42fe9caSJoonas Lahtinen vma->size <<= PAGE_SHIFT; 1501a74fc0bSVille Syrjälä } else if (view->type == I915_GGTT_VIEW_REMAPPED) { 1511a74fc0bSVille Syrjälä vma->size = intel_remapped_info_size(&view->remapped); 1521a74fc0bSVille Syrjälä vma->size <<= PAGE_SHIFT; 153b42fe9caSJoonas Lahtinen } 154b42fe9caSJoonas Lahtinen } 155b42fe9caSJoonas Lahtinen 1561fcdaa7eSChris Wilson if (unlikely(vma->size > vm->total)) 1571fcdaa7eSChris Wilson goto err_vma; 1581fcdaa7eSChris Wilson 159b00ddb27SChris Wilson GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 160b00ddb27SChris Wilson 161b42fe9caSJoonas Lahtinen if (i915_is_ggtt(vm)) { 1621fcdaa7eSChris Wilson if (unlikely(overflows_type(vma->size, u32))) 1631fcdaa7eSChris Wilson goto err_vma; 1641fcdaa7eSChris Wilson 16591d4e0aaSChris Wilson vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 166944397f0SChris Wilson i915_gem_object_get_tiling(obj), 167944397f0SChris Wilson i915_gem_object_get_stride(obj)); 1681fcdaa7eSChris Wilson if (unlikely(vma->fence_size < vma->size || /* overflow */ 1691fcdaa7eSChris Wilson vma->fence_size > vm->total)) 1701fcdaa7eSChris Wilson goto err_vma; 1711fcdaa7eSChris Wilson 172f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 173944397f0SChris Wilson 17491d4e0aaSChris Wilson vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 175944397f0SChris Wilson i915_gem_object_get_tiling(obj), 176944397f0SChris Wilson i915_gem_object_get_stride(obj)); 177944397f0SChris Wilson GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 178944397f0SChris Wilson 1794dd2fbbfSChris Wilson __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 180528cbd17SChris Wilson } 181528cbd17SChris Wilson 182528cbd17SChris Wilson spin_lock(&obj->vma.lock); 183528cbd17SChris Wilson 184528cbd17SChris Wilson rb = NULL; 185528cbd17SChris Wilson p = &obj->vma.tree.rb_node; 186528cbd17SChris Wilson while (*p) { 187528cbd17SChris Wilson struct i915_vma *pos; 188528cbd17SChris Wilson long cmp; 189528cbd17SChris Wilson 190528cbd17SChris Wilson rb = *p; 191528cbd17SChris Wilson pos = rb_entry(rb, struct i915_vma, obj_node); 192528cbd17SChris Wilson 193528cbd17SChris Wilson /* 194528cbd17SChris Wilson * If the view already exists in the tree, another thread 195528cbd17SChris Wilson * already created a matching vma, so return the older instance 196528cbd17SChris Wilson * and dispose of ours. 197528cbd17SChris Wilson */ 198528cbd17SChris Wilson cmp = i915_vma_compare(pos, vm, view); 199528cbd17SChris Wilson if (cmp == 0) { 200528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 20113f1bfd3SChris Wilson i915_vma_free(vma); 202528cbd17SChris Wilson return pos; 203528cbd17SChris Wilson } 204528cbd17SChris Wilson 205528cbd17SChris Wilson if (cmp < 0) 206528cbd17SChris Wilson p = &rb->rb_right; 207528cbd17SChris Wilson else 208528cbd17SChris Wilson p = &rb->rb_left; 209528cbd17SChris Wilson } 210528cbd17SChris Wilson rb_link_node(&vma->obj_node, rb, p); 211528cbd17SChris Wilson rb_insert_color(&vma->obj_node, &obj->vma.tree); 212528cbd17SChris Wilson 213528cbd17SChris Wilson if (i915_vma_is_ggtt(vma)) 214e2189dd0SChris Wilson /* 215e2189dd0SChris Wilson * We put the GGTT vma at the start of the vma-list, followed 216e2189dd0SChris Wilson * by the ppGGTT vma. This allows us to break early when 217e2189dd0SChris Wilson * iterating over only the GGTT vma for an object, see 218e2189dd0SChris Wilson * for_each_ggtt_vma() 219e2189dd0SChris Wilson */ 220528cbd17SChris Wilson list_add(&vma->obj_link, &obj->vma.list); 221b42fe9caSJoonas Lahtinen else 222528cbd17SChris Wilson list_add_tail(&vma->obj_link, &obj->vma.list); 223528cbd17SChris Wilson 224528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 22509d7e46bSChris Wilson 226b42fe9caSJoonas Lahtinen return vma; 2271fcdaa7eSChris Wilson 2281fcdaa7eSChris Wilson err_vma: 22913f1bfd3SChris Wilson i915_vma_free(vma); 2301fcdaa7eSChris Wilson return ERR_PTR(-E2BIG); 231b42fe9caSJoonas Lahtinen } 232b42fe9caSJoonas Lahtinen 233481a6f7dSChris Wilson static struct i915_vma * 234481a6f7dSChris Wilson vma_lookup(struct drm_i915_gem_object *obj, 235718659a6SChris Wilson struct i915_address_space *vm, 236718659a6SChris Wilson const struct i915_ggtt_view *view) 237718659a6SChris Wilson { 238718659a6SChris Wilson struct rb_node *rb; 239718659a6SChris Wilson 240528cbd17SChris Wilson rb = obj->vma.tree.rb_node; 241718659a6SChris Wilson while (rb) { 242718659a6SChris Wilson struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 243718659a6SChris Wilson long cmp; 244718659a6SChris Wilson 245718659a6SChris Wilson cmp = i915_vma_compare(vma, vm, view); 246718659a6SChris Wilson if (cmp == 0) 247718659a6SChris Wilson return vma; 248718659a6SChris Wilson 249718659a6SChris Wilson if (cmp < 0) 250718659a6SChris Wilson rb = rb->rb_right; 251718659a6SChris Wilson else 252718659a6SChris Wilson rb = rb->rb_left; 253718659a6SChris Wilson } 254718659a6SChris Wilson 255718659a6SChris Wilson return NULL; 256718659a6SChris Wilson } 257718659a6SChris Wilson 258718659a6SChris Wilson /** 259718659a6SChris Wilson * i915_vma_instance - return the singleton instance of the VMA 260718659a6SChris Wilson * @obj: parent &struct drm_i915_gem_object to be mapped 261718659a6SChris Wilson * @vm: address space in which the mapping is located 262718659a6SChris Wilson * @view: additional mapping requirements 263718659a6SChris Wilson * 264718659a6SChris Wilson * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 265718659a6SChris Wilson * the same @view characteristics. If a match is not found, one is created. 266718659a6SChris Wilson * Once created, the VMA is kept until either the object is freed, or the 267718659a6SChris Wilson * address space is closed. 268718659a6SChris Wilson * 269718659a6SChris Wilson * Returns the vma, or an error pointer. 270718659a6SChris Wilson */ 271718659a6SChris Wilson struct i915_vma * 272718659a6SChris Wilson i915_vma_instance(struct drm_i915_gem_object *obj, 273718659a6SChris Wilson struct i915_address_space *vm, 274718659a6SChris Wilson const struct i915_ggtt_view *view) 275718659a6SChris Wilson { 276718659a6SChris Wilson struct i915_vma *vma; 277718659a6SChris Wilson 278718659a6SChris Wilson GEM_BUG_ON(view && !i915_is_ggtt(vm)); 2792850748eSChris Wilson GEM_BUG_ON(!atomic_read(&vm->open)); 280718659a6SChris Wilson 281528cbd17SChris Wilson spin_lock(&obj->vma.lock); 282481a6f7dSChris Wilson vma = vma_lookup(obj, vm, view); 283528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 284528cbd17SChris Wilson 285528cbd17SChris Wilson /* vma_create() will resolve the race if another creates the vma */ 286528cbd17SChris Wilson if (unlikely(!vma)) 287a01cb37aSChris Wilson vma = vma_create(obj, vm, view); 288718659a6SChris Wilson 2894ea9527cSChris Wilson GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 290718659a6SChris Wilson return vma; 291718659a6SChris Wilson } 292718659a6SChris Wilson 2932850748eSChris Wilson struct i915_vma_work { 2942850748eSChris Wilson struct dma_fence_work base; 2952850748eSChris Wilson struct i915_vma *vma; 29654d7195fSChris Wilson struct drm_i915_gem_object *pinned; 297e3793468SChris Wilson struct i915_sw_dma_fence_cb cb; 2982850748eSChris Wilson enum i915_cache_level cache_level; 2992850748eSChris Wilson unsigned int flags; 3002850748eSChris Wilson }; 3012850748eSChris Wilson 3022850748eSChris Wilson static int __vma_bind(struct dma_fence_work *work) 3032850748eSChris Wilson { 3042850748eSChris Wilson struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 3052850748eSChris Wilson struct i915_vma *vma = vw->vma; 3062850748eSChris Wilson int err; 3072850748eSChris Wilson 3082850748eSChris Wilson err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); 3092850748eSChris Wilson if (err) 3102850748eSChris Wilson atomic_or(I915_VMA_ERROR, &vma->flags); 3112850748eSChris Wilson 3122850748eSChris Wilson return err; 3132850748eSChris Wilson } 3142850748eSChris Wilson 31554d7195fSChris Wilson static void __vma_release(struct dma_fence_work *work) 31654d7195fSChris Wilson { 31754d7195fSChris Wilson struct i915_vma_work *vw = container_of(work, typeof(*vw), base); 31854d7195fSChris Wilson 31954d7195fSChris Wilson if (vw->pinned) 32054d7195fSChris Wilson __i915_gem_object_unpin_pages(vw->pinned); 32154d7195fSChris Wilson } 32254d7195fSChris Wilson 3232850748eSChris Wilson static const struct dma_fence_work_ops bind_ops = { 3242850748eSChris Wilson .name = "bind", 3252850748eSChris Wilson .work = __vma_bind, 32654d7195fSChris Wilson .release = __vma_release, 3272850748eSChris Wilson }; 3282850748eSChris Wilson 3292850748eSChris Wilson struct i915_vma_work *i915_vma_work(void) 3302850748eSChris Wilson { 3312850748eSChris Wilson struct i915_vma_work *vw; 3322850748eSChris Wilson 3332850748eSChris Wilson vw = kzalloc(sizeof(*vw), GFP_KERNEL); 3342850748eSChris Wilson if (!vw) 3352850748eSChris Wilson return NULL; 3362850748eSChris Wilson 3372850748eSChris Wilson dma_fence_work_init(&vw->base, &bind_ops); 3382850748eSChris Wilson vw->base.dma.error = -EAGAIN; /* disable the worker by default */ 3392850748eSChris Wilson 3402850748eSChris Wilson return vw; 3412850748eSChris Wilson } 3422850748eSChris Wilson 343e3793468SChris Wilson int i915_vma_wait_for_bind(struct i915_vma *vma) 344e3793468SChris Wilson { 345e3793468SChris Wilson int err = 0; 346e3793468SChris Wilson 347e3793468SChris Wilson if (rcu_access_pointer(vma->active.excl.fence)) { 348e3793468SChris Wilson struct dma_fence *fence; 349e3793468SChris Wilson 350e3793468SChris Wilson rcu_read_lock(); 351e3793468SChris Wilson fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); 352e3793468SChris Wilson rcu_read_unlock(); 353e3793468SChris Wilson if (fence) { 354e3793468SChris Wilson err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); 355e3793468SChris Wilson dma_fence_put(fence); 356e3793468SChris Wilson } 357e3793468SChris Wilson } 358e3793468SChris Wilson 359e3793468SChris Wilson return err; 360e3793468SChris Wilson } 361e3793468SChris Wilson 362718659a6SChris Wilson /** 363b42fe9caSJoonas Lahtinen * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 364b42fe9caSJoonas Lahtinen * @vma: VMA to map 365b42fe9caSJoonas Lahtinen * @cache_level: mapping cache level 366b42fe9caSJoonas Lahtinen * @flags: flags like global or local mapping 3672850748eSChris Wilson * @work: preallocated worker for allocating and binding the PTE 368b42fe9caSJoonas Lahtinen * 369b42fe9caSJoonas Lahtinen * DMA addresses are taken from the scatter-gather table of this object (or of 370b42fe9caSJoonas Lahtinen * this VMA in case of non-default GGTT views) and PTE entries set up. 371b42fe9caSJoonas Lahtinen * Note that DMA addresses are also the only part of the SG table we care about. 372b42fe9caSJoonas Lahtinen */ 3732850748eSChris Wilson int i915_vma_bind(struct i915_vma *vma, 3742850748eSChris Wilson enum i915_cache_level cache_level, 3752850748eSChris Wilson u32 flags, 3762850748eSChris Wilson struct i915_vma_work *work) 377b42fe9caSJoonas Lahtinen { 378b42fe9caSJoonas Lahtinen u32 bind_flags; 379b42fe9caSJoonas Lahtinen u32 vma_flags; 380b42fe9caSJoonas Lahtinen int ret; 381b42fe9caSJoonas Lahtinen 382aa149431SChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 383aa149431SChris Wilson GEM_BUG_ON(vma->size > vma->node.size); 384aa149431SChris Wilson 385bbb8a9d7STvrtko Ursulin if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 386aa149431SChris Wilson vma->node.size, 387aa149431SChris Wilson vma->vm->total))) 388aa149431SChris Wilson return -ENODEV; 389aa149431SChris Wilson 390bbb8a9d7STvrtko Ursulin if (GEM_DEBUG_WARN_ON(!flags)) 391b42fe9caSJoonas Lahtinen return -EINVAL; 392b42fe9caSJoonas Lahtinen 3932850748eSChris Wilson bind_flags = flags; 3942850748eSChris Wilson bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 395b42fe9caSJoonas Lahtinen 3964dd2fbbfSChris Wilson vma_flags = atomic_read(&vma->flags); 3974dd2fbbfSChris Wilson vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; 398b42fe9caSJoonas Lahtinen if (flags & PIN_UPDATE) 399b42fe9caSJoonas Lahtinen bind_flags |= vma_flags; 400b42fe9caSJoonas Lahtinen else 401b42fe9caSJoonas Lahtinen bind_flags &= ~vma_flags; 402b42fe9caSJoonas Lahtinen if (bind_flags == 0) 403b42fe9caSJoonas Lahtinen return 0; 404b42fe9caSJoonas Lahtinen 405fa3f46afSMatthew Auld GEM_BUG_ON(!vma->pages); 406fa3f46afSMatthew Auld 4076146e6daSDaniele Ceraolo Spurio trace_i915_vma_bind(vma, bind_flags); 4082850748eSChris Wilson if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) { 409e3793468SChris Wilson struct dma_fence *prev; 410e3793468SChris Wilson 4112850748eSChris Wilson work->vma = vma; 4122850748eSChris Wilson work->cache_level = cache_level; 4132850748eSChris Wilson work->flags = bind_flags | I915_VMA_ALLOC; 4142850748eSChris Wilson 4152850748eSChris Wilson /* 4162850748eSChris Wilson * Note we only want to chain up to the migration fence on 4172850748eSChris Wilson * the pages (not the object itself). As we don't track that, 4182850748eSChris Wilson * yet, we have to use the exclusive fence instead. 4192850748eSChris Wilson * 4202850748eSChris Wilson * Also note that we do not want to track the async vma as 4212850748eSChris Wilson * part of the obj->resv->excl_fence as it only affects 4222850748eSChris Wilson * execution and not content or object's backing store lifetime. 4232850748eSChris Wilson */ 424e3793468SChris Wilson prev = i915_active_set_exclusive(&vma->active, &work->base.dma); 42530ca04e1SChris Wilson if (prev) { 426e3793468SChris Wilson __i915_sw_fence_await_dma_fence(&work->base.chain, 427e3793468SChris Wilson prev, 428e3793468SChris Wilson &work->cb); 42930ca04e1SChris Wilson dma_fence_put(prev); 43030ca04e1SChris Wilson } 431e3793468SChris Wilson 4322850748eSChris Wilson work->base.dma.error = 0; /* enable the queue_work() */ 4332850748eSChris Wilson 43454d7195fSChris Wilson if (vma->obj) { 4352850748eSChris Wilson __i915_gem_object_pin_pages(vma->obj); 43654d7195fSChris Wilson work->pinned = vma->obj; 43754d7195fSChris Wilson } 4382850748eSChris Wilson } else { 43993f2cde2SChris Wilson ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 440b42fe9caSJoonas Lahtinen if (ret) 441b42fe9caSJoonas Lahtinen return ret; 4422850748eSChris Wilson } 443b42fe9caSJoonas Lahtinen 4444dd2fbbfSChris Wilson atomic_or(bind_flags, &vma->flags); 445b42fe9caSJoonas Lahtinen return 0; 446b42fe9caSJoonas Lahtinen } 447b42fe9caSJoonas Lahtinen 448b42fe9caSJoonas Lahtinen void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 449b42fe9caSJoonas Lahtinen { 450b42fe9caSJoonas Lahtinen void __iomem *ptr; 451b4563f59SChris Wilson int err; 452b42fe9caSJoonas Lahtinen 4532850748eSChris Wilson if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 454b4563f59SChris Wilson err = -ENODEV; 455b4563f59SChris Wilson goto err; 456b4563f59SChris Wilson } 457b42fe9caSJoonas Lahtinen 458b42fe9caSJoonas Lahtinen GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 4594dd2fbbfSChris Wilson GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); 460b42fe9caSJoonas Lahtinen 4612850748eSChris Wilson ptr = READ_ONCE(vma->iomap); 462b42fe9caSJoonas Lahtinen if (ptr == NULL) { 46373ebd503SMatthew Auld ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 464b42fe9caSJoonas Lahtinen vma->node.start, 465b42fe9caSJoonas Lahtinen vma->node.size); 466b4563f59SChris Wilson if (ptr == NULL) { 467b4563f59SChris Wilson err = -ENOMEM; 468b4563f59SChris Wilson goto err; 469b4563f59SChris Wilson } 470b42fe9caSJoonas Lahtinen 4712850748eSChris Wilson if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { 4722850748eSChris Wilson io_mapping_unmap(ptr); 4732850748eSChris Wilson ptr = vma->iomap; 4742850748eSChris Wilson } 475b42fe9caSJoonas Lahtinen } 476b42fe9caSJoonas Lahtinen 477b42fe9caSJoonas Lahtinen __i915_vma_pin(vma); 478b4563f59SChris Wilson 4793bd40735SChris Wilson err = i915_vma_pin_fence(vma); 480b4563f59SChris Wilson if (err) 481b4563f59SChris Wilson goto err_unpin; 482b4563f59SChris Wilson 4837125397bSChris Wilson i915_vma_set_ggtt_write(vma); 484a5972e93SChris Wilson 485a5972e93SChris Wilson /* NB Access through the GTT requires the device to be awake. */ 486b42fe9caSJoonas Lahtinen return ptr; 487b4563f59SChris Wilson 488b4563f59SChris Wilson err_unpin: 489b4563f59SChris Wilson __i915_vma_unpin(vma); 490b4563f59SChris Wilson err: 491b4563f59SChris Wilson return IO_ERR_PTR(err); 492b4563f59SChris Wilson } 493b4563f59SChris Wilson 4947125397bSChris Wilson void i915_vma_flush_writes(struct i915_vma *vma) 4957125397bSChris Wilson { 4962850748eSChris Wilson if (i915_vma_unset_ggtt_write(vma)) 497a1c8a09eSTvrtko Ursulin intel_gt_flush_ggtt_writes(vma->vm->gt); 4987125397bSChris Wilson } 4997125397bSChris Wilson 500b4563f59SChris Wilson void i915_vma_unpin_iomap(struct i915_vma *vma) 501b4563f59SChris Wilson { 502b4563f59SChris Wilson GEM_BUG_ON(vma->iomap == NULL); 503b4563f59SChris Wilson 5047125397bSChris Wilson i915_vma_flush_writes(vma); 5057125397bSChris Wilson 506b4563f59SChris Wilson i915_vma_unpin_fence(vma); 507b4563f59SChris Wilson i915_vma_unpin(vma); 508b42fe9caSJoonas Lahtinen } 509b42fe9caSJoonas Lahtinen 5106a2f59e4SChris Wilson void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 511b42fe9caSJoonas Lahtinen { 512b42fe9caSJoonas Lahtinen struct i915_vma *vma; 513b42fe9caSJoonas Lahtinen struct drm_i915_gem_object *obj; 514b42fe9caSJoonas Lahtinen 515b42fe9caSJoonas Lahtinen vma = fetch_and_zero(p_vma); 516b42fe9caSJoonas Lahtinen if (!vma) 517b42fe9caSJoonas Lahtinen return; 518b42fe9caSJoonas Lahtinen 519b42fe9caSJoonas Lahtinen obj = vma->obj; 520520ea7c5SChris Wilson GEM_BUG_ON(!obj); 521b42fe9caSJoonas Lahtinen 522b42fe9caSJoonas Lahtinen i915_vma_unpin(vma); 523b42fe9caSJoonas Lahtinen i915_vma_close(vma); 524b42fe9caSJoonas Lahtinen 5256a2f59e4SChris Wilson if (flags & I915_VMA_RELEASE_MAP) 5266a2f59e4SChris Wilson i915_gem_object_unpin_map(obj); 5276a2f59e4SChris Wilson 528c017cf6bSChris Wilson i915_gem_object_put(obj); 529b42fe9caSJoonas Lahtinen } 530b42fe9caSJoonas Lahtinen 531782a3e9eSChris Wilson bool i915_vma_misplaced(const struct i915_vma *vma, 532782a3e9eSChris Wilson u64 size, u64 alignment, u64 flags) 533b42fe9caSJoonas Lahtinen { 534b42fe9caSJoonas Lahtinen if (!drm_mm_node_allocated(&vma->node)) 535b42fe9caSJoonas Lahtinen return false; 536b42fe9caSJoonas Lahtinen 5372850748eSChris Wilson if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) 5382850748eSChris Wilson return true; 5392850748eSChris Wilson 540b42fe9caSJoonas Lahtinen if (vma->node.size < size) 541b42fe9caSJoonas Lahtinen return true; 542b42fe9caSJoonas Lahtinen 543f51455d4SChris Wilson GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 544f51455d4SChris Wilson if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 545b42fe9caSJoonas Lahtinen return true; 546b42fe9caSJoonas Lahtinen 547b42fe9caSJoonas Lahtinen if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 548b42fe9caSJoonas Lahtinen return true; 549b42fe9caSJoonas Lahtinen 550b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_BIAS && 551b42fe9caSJoonas Lahtinen vma->node.start < (flags & PIN_OFFSET_MASK)) 552b42fe9caSJoonas Lahtinen return true; 553b42fe9caSJoonas Lahtinen 554b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_FIXED && 555b42fe9caSJoonas Lahtinen vma->node.start != (flags & PIN_OFFSET_MASK)) 556b42fe9caSJoonas Lahtinen return true; 557b42fe9caSJoonas Lahtinen 558b42fe9caSJoonas Lahtinen return false; 559b42fe9caSJoonas Lahtinen } 560b42fe9caSJoonas Lahtinen 561b42fe9caSJoonas Lahtinen void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 562b42fe9caSJoonas Lahtinen { 563b42fe9caSJoonas Lahtinen bool mappable, fenceable; 564b42fe9caSJoonas Lahtinen 565944397f0SChris Wilson GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 566944397f0SChris Wilson GEM_BUG_ON(!vma->fence_size); 567b42fe9caSJoonas Lahtinen 568944397f0SChris Wilson fenceable = (vma->node.size >= vma->fence_size && 569f51455d4SChris Wilson IS_ALIGNED(vma->node.start, vma->fence_alignment)); 570944397f0SChris Wilson 571944397f0SChris Wilson mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 572944397f0SChris Wilson 573944397f0SChris Wilson if (mappable && fenceable) 5744dd2fbbfSChris Wilson set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 575b42fe9caSJoonas Lahtinen else 5764dd2fbbfSChris Wilson clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 577b42fe9caSJoonas Lahtinen } 578b42fe9caSJoonas Lahtinen 57933dd8899SMatthew Auld bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) 5807d1d9aeaSChris Wilson { 5817d1d9aeaSChris Wilson struct drm_mm_node *node = &vma->node; 582b42fe9caSJoonas Lahtinen struct drm_mm_node *other; 583b42fe9caSJoonas Lahtinen 584b42fe9caSJoonas Lahtinen /* 585b42fe9caSJoonas Lahtinen * On some machines we have to be careful when putting differing types 586b42fe9caSJoonas Lahtinen * of snoopable memory together to avoid the prefetcher crossing memory 587b42fe9caSJoonas Lahtinen * domains and dying. During vm initialisation, we decide whether or not 588b42fe9caSJoonas Lahtinen * these constraints apply and set the drm_mm.color_adjust 589b42fe9caSJoonas Lahtinen * appropriately. 590b42fe9caSJoonas Lahtinen */ 59133dd8899SMatthew Auld if (!i915_vm_has_cache_coloring(vma->vm)) 592b42fe9caSJoonas Lahtinen return true; 593b42fe9caSJoonas Lahtinen 5947d1d9aeaSChris Wilson /* Only valid to be called on an already inserted vma */ 5957d1d9aeaSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(node)); 5967d1d9aeaSChris Wilson GEM_BUG_ON(list_empty(&node->node_list)); 597b42fe9caSJoonas Lahtinen 5987d1d9aeaSChris Wilson other = list_prev_entry(node, node_list); 59933dd8899SMatthew Auld if (i915_node_color_differs(other, color) && 6001e0a96e5SMatthew Auld !drm_mm_hole_follows(other)) 601b42fe9caSJoonas Lahtinen return false; 602b42fe9caSJoonas Lahtinen 6037d1d9aeaSChris Wilson other = list_next_entry(node, node_list); 60433dd8899SMatthew Auld if (i915_node_color_differs(other, color) && 6051e0a96e5SMatthew Auld !drm_mm_hole_follows(node)) 606b42fe9caSJoonas Lahtinen return false; 607b42fe9caSJoonas Lahtinen 608b42fe9caSJoonas Lahtinen return true; 609b42fe9caSJoonas Lahtinen } 610b42fe9caSJoonas Lahtinen 611b42fe9caSJoonas Lahtinen /** 612b42fe9caSJoonas Lahtinen * i915_vma_insert - finds a slot for the vma in its address space 613b42fe9caSJoonas Lahtinen * @vma: the vma 614b42fe9caSJoonas Lahtinen * @size: requested size in bytes (can be larger than the VMA) 615b42fe9caSJoonas Lahtinen * @alignment: required alignment 616b42fe9caSJoonas Lahtinen * @flags: mask of PIN_* flags to use 617b42fe9caSJoonas Lahtinen * 618b42fe9caSJoonas Lahtinen * First we try to allocate some free space that meets the requirements for 619b42fe9caSJoonas Lahtinen * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 620b42fe9caSJoonas Lahtinen * preferrably the oldest idle entry to make room for the new VMA. 621b42fe9caSJoonas Lahtinen * 622b42fe9caSJoonas Lahtinen * Returns: 623b42fe9caSJoonas Lahtinen * 0 on success, negative error code otherwise. 624b42fe9caSJoonas Lahtinen */ 625b42fe9caSJoonas Lahtinen static int 626b42fe9caSJoonas Lahtinen i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 627b42fe9caSJoonas Lahtinen { 62833dd8899SMatthew Auld unsigned long color; 629b42fe9caSJoonas Lahtinen u64 start, end; 630b42fe9caSJoonas Lahtinen int ret; 631b42fe9caSJoonas Lahtinen 6324dd2fbbfSChris Wilson GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 633b42fe9caSJoonas Lahtinen GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 634b42fe9caSJoonas Lahtinen 635b42fe9caSJoonas Lahtinen size = max(size, vma->size); 636944397f0SChris Wilson alignment = max(alignment, vma->display_alignment); 637944397f0SChris Wilson if (flags & PIN_MAPPABLE) { 638944397f0SChris Wilson size = max_t(typeof(size), size, vma->fence_size); 639944397f0SChris Wilson alignment = max_t(typeof(alignment), 640944397f0SChris Wilson alignment, vma->fence_alignment); 641944397f0SChris Wilson } 642b42fe9caSJoonas Lahtinen 643f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 644f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 645f51455d4SChris Wilson GEM_BUG_ON(!is_power_of_2(alignment)); 646f51455d4SChris Wilson 647b42fe9caSJoonas Lahtinen start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 648f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 649b42fe9caSJoonas Lahtinen 650b42fe9caSJoonas Lahtinen end = vma->vm->total; 651b42fe9caSJoonas Lahtinen if (flags & PIN_MAPPABLE) 6522850748eSChris Wilson end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); 653b42fe9caSJoonas Lahtinen if (flags & PIN_ZONE_4G) 654f51455d4SChris Wilson end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 655f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 656b42fe9caSJoonas Lahtinen 657b42fe9caSJoonas Lahtinen /* If binding the object/GGTT view requires more space than the entire 658b42fe9caSJoonas Lahtinen * aperture has, reject it early before evicting everything in a vain 659b42fe9caSJoonas Lahtinen * attempt to find space. 660b42fe9caSJoonas Lahtinen */ 661b42fe9caSJoonas Lahtinen if (size > end) { 662520ea7c5SChris Wilson DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 663520ea7c5SChris Wilson size, flags & PIN_MAPPABLE ? "mappable" : "total", 664b42fe9caSJoonas Lahtinen end); 6652889caa9SChris Wilson return -ENOSPC; 666b42fe9caSJoonas Lahtinen } 667b42fe9caSJoonas Lahtinen 66833dd8899SMatthew Auld color = 0; 6692850748eSChris Wilson if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) 67033dd8899SMatthew Auld color = vma->obj->cache_level; 671fa3f46afSMatthew Auld 672b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_FIXED) { 673b42fe9caSJoonas Lahtinen u64 offset = flags & PIN_OFFSET_MASK; 674f51455d4SChris Wilson if (!IS_ALIGNED(offset, alignment) || 6752850748eSChris Wilson range_overflows(offset, size, end)) 6762850748eSChris Wilson return -EINVAL; 677b42fe9caSJoonas Lahtinen 678625d988aSChris Wilson ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 67933dd8899SMatthew Auld size, offset, color, 680625d988aSChris Wilson flags); 681b42fe9caSJoonas Lahtinen if (ret) 6822850748eSChris Wilson return ret; 683b42fe9caSJoonas Lahtinen } else { 6847464284bSMatthew Auld /* 6857464284bSMatthew Auld * We only support huge gtt pages through the 48b PPGTT, 6867464284bSMatthew Auld * however we also don't want to force any alignment for 6877464284bSMatthew Auld * objects which need to be tightly packed into the low 32bits. 6887464284bSMatthew Auld * 6897464284bSMatthew Auld * Note that we assume that GGTT are limited to 4GiB for the 6907464284bSMatthew Auld * forseeable future. See also i915_ggtt_offset(). 6917464284bSMatthew Auld */ 6927464284bSMatthew Auld if (upper_32_bits(end - 1) && 6937464284bSMatthew Auld vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 694855822beSMatthew Auld /* 695855822beSMatthew Auld * We can't mix 64K and 4K PTEs in the same page-table 696855822beSMatthew Auld * (2M block), and so to avoid the ugliness and 697855822beSMatthew Auld * complexity of coloring we opt for just aligning 64K 698855822beSMatthew Auld * objects to 2M. 699855822beSMatthew Auld */ 7007464284bSMatthew Auld u64 page_alignment = 701855822beSMatthew Auld rounddown_pow_of_two(vma->page_sizes.sg | 702855822beSMatthew Auld I915_GTT_PAGE_SIZE_2M); 7037464284bSMatthew Auld 704bef27bdbSChris Wilson /* 705bef27bdbSChris Wilson * Check we don't expand for the limited Global GTT 706bef27bdbSChris Wilson * (mappable aperture is even more precious!). This 707bef27bdbSChris Wilson * also checks that we exclude the aliasing-ppgtt. 708bef27bdbSChris Wilson */ 709bef27bdbSChris Wilson GEM_BUG_ON(i915_vma_is_ggtt(vma)); 710bef27bdbSChris Wilson 7117464284bSMatthew Auld alignment = max(alignment, page_alignment); 712855822beSMatthew Auld 713855822beSMatthew Auld if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 714855822beSMatthew Auld size = round_up(size, I915_GTT_PAGE_SIZE_2M); 7157464284bSMatthew Auld } 7167464284bSMatthew Auld 717e007b19dSChris Wilson ret = i915_gem_gtt_insert(vma->vm, &vma->node, 71833dd8899SMatthew Auld size, alignment, color, 719e007b19dSChris Wilson start, end, flags); 720e007b19dSChris Wilson if (ret) 7212850748eSChris Wilson return ret; 722b42fe9caSJoonas Lahtinen 723b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->node.start < start); 724b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->node.start + vma->node.size > end); 725b42fe9caSJoonas Lahtinen } 72644a0ec0dSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 72733dd8899SMatthew Auld GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); 728b42fe9caSJoonas Lahtinen 729dde01d94SChris Wilson list_add_tail(&vma->vm_link, &vma->vm->bound_list); 730b42fe9caSJoonas Lahtinen 731b42fe9caSJoonas Lahtinen return 0; 732b42fe9caSJoonas Lahtinen } 733b42fe9caSJoonas Lahtinen 73431c7effaSChris Wilson static void 735dde01d94SChris Wilson i915_vma_detach(struct i915_vma *vma) 73631c7effaSChris Wilson { 73731c7effaSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 7384dd2fbbfSChris Wilson GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 73931c7effaSChris Wilson 740520ea7c5SChris Wilson /* 741520ea7c5SChris Wilson * And finally now the object is completely decoupled from this 742520ea7c5SChris Wilson * vma, we can drop its hold on the backing storage and allow 743520ea7c5SChris Wilson * it to be reaped by the shrinker. 74431c7effaSChris Wilson */ 745dde01d94SChris Wilson list_del(&vma->vm_link); 746520ea7c5SChris Wilson } 74731c7effaSChris Wilson 7482850748eSChris Wilson static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) 749b42fe9caSJoonas Lahtinen { 7502850748eSChris Wilson unsigned int bound; 7512850748eSChris Wilson bool pinned = true; 752b42fe9caSJoonas Lahtinen 7532850748eSChris Wilson bound = atomic_read(&vma->flags); 7542850748eSChris Wilson do { 7552850748eSChris Wilson if (unlikely(flags & ~bound)) 7562850748eSChris Wilson return false; 757b42fe9caSJoonas Lahtinen 7582850748eSChris Wilson if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) 7592850748eSChris Wilson return false; 7602850748eSChris Wilson 7612850748eSChris Wilson if (!(bound & I915_VMA_PIN_MASK)) 7622850748eSChris Wilson goto unpinned; 7632850748eSChris Wilson 7642850748eSChris Wilson GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); 7652850748eSChris Wilson } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 7662850748eSChris Wilson 7672850748eSChris Wilson return true; 7682850748eSChris Wilson 7692850748eSChris Wilson unpinned: 7702850748eSChris Wilson /* 7712850748eSChris Wilson * If pin_count==0, but we are bound, check under the lock to avoid 7722850748eSChris Wilson * racing with a concurrent i915_vma_unbind(). 7732850748eSChris Wilson */ 7742850748eSChris Wilson mutex_lock(&vma->vm->mutex); 7752850748eSChris Wilson do { 7762850748eSChris Wilson if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { 7772850748eSChris Wilson pinned = false; 7782850748eSChris Wilson break; 779b42fe9caSJoonas Lahtinen } 780b42fe9caSJoonas Lahtinen 7812850748eSChris Wilson if (unlikely(flags & ~bound)) { 7822850748eSChris Wilson pinned = false; 7832850748eSChris Wilson break; 784b42fe9caSJoonas Lahtinen } 7852850748eSChris Wilson } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); 7862850748eSChris Wilson mutex_unlock(&vma->vm->mutex); 787b42fe9caSJoonas Lahtinen 7882850748eSChris Wilson return pinned; 7892850748eSChris Wilson } 790b42fe9caSJoonas Lahtinen 7912850748eSChris Wilson static int vma_get_pages(struct i915_vma *vma) 7922850748eSChris Wilson { 7932850748eSChris Wilson int err = 0; 794d36caeeaSChris Wilson 7952850748eSChris Wilson if (atomic_add_unless(&vma->pages_count, 1, 0)) 796b42fe9caSJoonas Lahtinen return 0; 797b42fe9caSJoonas Lahtinen 7982850748eSChris Wilson /* Allocations ahoy! */ 7992850748eSChris Wilson if (mutex_lock_interruptible(&vma->pages_mutex)) 8002850748eSChris Wilson return -EINTR; 8012850748eSChris Wilson 8022850748eSChris Wilson if (!atomic_read(&vma->pages_count)) { 8032850748eSChris Wilson if (vma->obj) { 8042850748eSChris Wilson err = i915_gem_object_pin_pages(vma->obj); 8052850748eSChris Wilson if (err) 8062850748eSChris Wilson goto unlock; 80731c7effaSChris Wilson } 8082850748eSChris Wilson 8092850748eSChris Wilson err = vma->ops->set_pages(vma); 81056184a20SChris Wilson if (err) { 81156184a20SChris Wilson if (vma->obj) 81256184a20SChris Wilson i915_gem_object_unpin_pages(vma->obj); 8132850748eSChris Wilson goto unlock; 8142850748eSChris Wilson } 81556184a20SChris Wilson } 8162850748eSChris Wilson atomic_inc(&vma->pages_count); 8172850748eSChris Wilson 8182850748eSChris Wilson unlock: 8192850748eSChris Wilson mutex_unlock(&vma->pages_mutex); 8202850748eSChris Wilson 8212850748eSChris Wilson return err; 8222850748eSChris Wilson } 8232850748eSChris Wilson 8242850748eSChris Wilson static void __vma_put_pages(struct i915_vma *vma, unsigned int count) 8252850748eSChris Wilson { 8262850748eSChris Wilson /* We allocate under vma_get_pages, so beware the shrinker */ 8272850748eSChris Wilson mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); 8282850748eSChris Wilson GEM_BUG_ON(atomic_read(&vma->pages_count) < count); 8292850748eSChris Wilson if (atomic_sub_return(count, &vma->pages_count) == 0) { 8302850748eSChris Wilson vma->ops->clear_pages(vma); 8312850748eSChris Wilson GEM_BUG_ON(vma->pages); 8322850748eSChris Wilson if (vma->obj) 8332850748eSChris Wilson i915_gem_object_unpin_pages(vma->obj); 8342850748eSChris Wilson } 8352850748eSChris Wilson mutex_unlock(&vma->pages_mutex); 8362850748eSChris Wilson } 8372850748eSChris Wilson 8382850748eSChris Wilson static void vma_put_pages(struct i915_vma *vma) 8392850748eSChris Wilson { 8402850748eSChris Wilson if (atomic_add_unless(&vma->pages_count, -1, 1)) 8412850748eSChris Wilson return; 8422850748eSChris Wilson 8432850748eSChris Wilson __vma_put_pages(vma, 1); 8442850748eSChris Wilson } 8452850748eSChris Wilson 8462850748eSChris Wilson static void vma_unbind_pages(struct i915_vma *vma) 8472850748eSChris Wilson { 8482850748eSChris Wilson unsigned int count; 8492850748eSChris Wilson 8502850748eSChris Wilson lockdep_assert_held(&vma->vm->mutex); 8512850748eSChris Wilson 8522850748eSChris Wilson /* The upper portion of pages_count is the number of bindings */ 8532850748eSChris Wilson count = atomic_read(&vma->pages_count); 8542850748eSChris Wilson count >>= I915_VMA_PAGES_BIAS; 8552850748eSChris Wilson GEM_BUG_ON(!count); 8562850748eSChris Wilson 8572850748eSChris Wilson __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); 8582850748eSChris Wilson } 8592850748eSChris Wilson 8602850748eSChris Wilson int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 8612850748eSChris Wilson { 8622850748eSChris Wilson struct i915_vma_work *work = NULL; 863c0e60347SChris Wilson intel_wakeref_t wakeref = 0; 8642850748eSChris Wilson unsigned int bound; 8652850748eSChris Wilson int err; 8662850748eSChris Wilson 8672850748eSChris Wilson BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); 8682850748eSChris Wilson BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); 8692850748eSChris Wilson 8702850748eSChris Wilson GEM_BUG_ON(flags & PIN_UPDATE); 8712850748eSChris Wilson GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); 8722850748eSChris Wilson 8732850748eSChris Wilson /* First try and grab the pin without rebinding the vma */ 8742850748eSChris Wilson if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) 8752850748eSChris Wilson return 0; 8762850748eSChris Wilson 8772850748eSChris Wilson err = vma_get_pages(vma); 8782850748eSChris Wilson if (err) 8792850748eSChris Wilson return err; 8802850748eSChris Wilson 8812850748eSChris Wilson if (flags & vma->vm->bind_async_flags) { 8822850748eSChris Wilson work = i915_vma_work(); 8832850748eSChris Wilson if (!work) { 8842850748eSChris Wilson err = -ENOMEM; 8852850748eSChris Wilson goto err_pages; 8862850748eSChris Wilson } 8872850748eSChris Wilson } 8882850748eSChris Wilson 889c0e60347SChris Wilson if (flags & PIN_GLOBAL) 890c0e60347SChris Wilson wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 891c0e60347SChris Wilson 892d0024911SChris Wilson /* 893d0024911SChris Wilson * Differentiate between user/kernel vma inside the aliasing-ppgtt. 894d0024911SChris Wilson * 895d0024911SChris Wilson * We conflate the Global GTT with the user's vma when using the 896d0024911SChris Wilson * aliasing-ppgtt, but it is still vitally important to try and 897d0024911SChris Wilson * keep the use cases distinct. For example, userptr objects are 898d0024911SChris Wilson * not allowed inside the Global GTT as that will cause lock 899d0024911SChris Wilson * inversions when we have to evict them the mmu_notifier callbacks - 900d0024911SChris Wilson * but they are allowed to be part of the user ppGTT which can never 901d0024911SChris Wilson * be mapped. As such we try to give the distinct users of the same 902d0024911SChris Wilson * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt 903d0024911SChris Wilson * and i915_ppgtt separate]. 904d0024911SChris Wilson * 905d0024911SChris Wilson * NB this may cause us to mask real lock inversions -- while the 906d0024911SChris Wilson * code is safe today, lockdep may not be able to spot future 907d0024911SChris Wilson * transgressions. 908d0024911SChris Wilson */ 909d0024911SChris Wilson err = mutex_lock_interruptible_nested(&vma->vm->mutex, 910d0024911SChris Wilson !(flags & PIN_GLOBAL)); 9112850748eSChris Wilson if (err) 9122850748eSChris Wilson goto err_fence; 9132850748eSChris Wilson 914d0024911SChris Wilson /* No more allocations allowed now we hold vm->mutex */ 915d0024911SChris Wilson 91600de702cSChris Wilson if (unlikely(i915_vma_is_closed(vma))) { 91700de702cSChris Wilson err = -ENOENT; 91800de702cSChris Wilson goto err_unlock; 91900de702cSChris Wilson } 92000de702cSChris Wilson 9212850748eSChris Wilson bound = atomic_read(&vma->flags); 9222850748eSChris Wilson if (unlikely(bound & I915_VMA_ERROR)) { 9232850748eSChris Wilson err = -ENOMEM; 9242850748eSChris Wilson goto err_unlock; 9252850748eSChris Wilson } 9262850748eSChris Wilson 9272850748eSChris Wilson if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { 9282850748eSChris Wilson err = -EAGAIN; /* pins are meant to be fairly temporary */ 9292850748eSChris Wilson goto err_unlock; 9302850748eSChris Wilson } 9312850748eSChris Wilson 9322850748eSChris Wilson if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { 9332850748eSChris Wilson __i915_vma_pin(vma); 9342850748eSChris Wilson goto err_unlock; 9352850748eSChris Wilson } 9362850748eSChris Wilson 9372850748eSChris Wilson err = i915_active_acquire(&vma->active); 9382850748eSChris Wilson if (err) 9392850748eSChris Wilson goto err_unlock; 9402850748eSChris Wilson 9412850748eSChris Wilson if (!(bound & I915_VMA_BIND_MASK)) { 9422850748eSChris Wilson err = i915_vma_insert(vma, size, alignment, flags); 9432850748eSChris Wilson if (err) 9442850748eSChris Wilson goto err_active; 9452850748eSChris Wilson 9462850748eSChris Wilson if (i915_is_ggtt(vma->vm)) 9472850748eSChris Wilson __i915_vma_set_map_and_fenceable(vma); 9482850748eSChris Wilson } 9492850748eSChris Wilson 9502850748eSChris Wilson GEM_BUG_ON(!vma->pages); 9512850748eSChris Wilson err = i915_vma_bind(vma, 9522850748eSChris Wilson vma->obj ? vma->obj->cache_level : 0, 9532850748eSChris Wilson flags, work); 9542850748eSChris Wilson if (err) 9552850748eSChris Wilson goto err_remove; 9562850748eSChris Wilson 9572850748eSChris Wilson /* There should only be at most 2 active bindings (user, global) */ 9582850748eSChris Wilson GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); 9592850748eSChris Wilson atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); 9602850748eSChris Wilson list_move_tail(&vma->vm_link, &vma->vm->bound_list); 9612850748eSChris Wilson 9622850748eSChris Wilson __i915_vma_pin(vma); 9632850748eSChris Wilson GEM_BUG_ON(!i915_vma_is_pinned(vma)); 9642850748eSChris Wilson GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); 9652850748eSChris Wilson GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 9662850748eSChris Wilson 9672850748eSChris Wilson err_remove: 968dde01d94SChris Wilson if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { 969dde01d94SChris Wilson i915_vma_detach(vma); 970dde01d94SChris Wilson drm_mm_remove_node(&vma->node); 971dde01d94SChris Wilson } 9722850748eSChris Wilson err_active: 9732850748eSChris Wilson i915_active_release(&vma->active); 9742850748eSChris Wilson err_unlock: 9752850748eSChris Wilson mutex_unlock(&vma->vm->mutex); 9762850748eSChris Wilson err_fence: 9772850748eSChris Wilson if (work) 97892581f9fSChris Wilson dma_fence_work_commit_imm(&work->base); 979c0e60347SChris Wilson if (wakeref) 980c0e60347SChris Wilson intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 9812850748eSChris Wilson err_pages: 9822850748eSChris Wilson vma_put_pages(vma); 9832850748eSChris Wilson return err; 984b42fe9caSJoonas Lahtinen } 985b42fe9caSJoonas Lahtinen 986ccd20945SChris Wilson static void flush_idle_contexts(struct intel_gt *gt) 987ccd20945SChris Wilson { 988ccd20945SChris Wilson struct intel_engine_cs *engine; 989ccd20945SChris Wilson enum intel_engine_id id; 990ccd20945SChris Wilson 991ccd20945SChris Wilson for_each_engine(engine, gt, id) 992ccd20945SChris Wilson intel_engine_flush_barriers(engine); 993ccd20945SChris Wilson 994ccd20945SChris Wilson intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 995ccd20945SChris Wilson } 996ccd20945SChris Wilson 997ccd20945SChris Wilson int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) 998ccd20945SChris Wilson { 999ccd20945SChris Wilson struct i915_address_space *vm = vma->vm; 1000ccd20945SChris Wilson int err; 1001ccd20945SChris Wilson 1002ccd20945SChris Wilson GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1003ccd20945SChris Wilson 1004ccd20945SChris Wilson do { 1005ccd20945SChris Wilson err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); 1006e3793468SChris Wilson if (err != -ENOSPC) { 1007e3793468SChris Wilson if (!err) { 1008e3793468SChris Wilson err = i915_vma_wait_for_bind(vma); 1009e3793468SChris Wilson if (err) 1010e3793468SChris Wilson i915_vma_unpin(vma); 1011e3793468SChris Wilson } 1012ccd20945SChris Wilson return err; 1013e3793468SChris Wilson } 1014ccd20945SChris Wilson 1015ccd20945SChris Wilson /* Unlike i915_vma_pin, we don't take no for an answer! */ 1016ccd20945SChris Wilson flush_idle_contexts(vm->gt); 1017ccd20945SChris Wilson if (mutex_lock_interruptible(&vm->mutex) == 0) { 1018ccd20945SChris Wilson i915_gem_evict_vm(vm); 1019ccd20945SChris Wilson mutex_unlock(&vm->mutex); 1020ccd20945SChris Wilson } 1021ccd20945SChris Wilson } while (1); 1022ccd20945SChris Wilson } 1023ccd20945SChris Wilson 10243365e226SChris Wilson void i915_vma_close(struct i915_vma *vma) 10253365e226SChris Wilson { 102671e51ca8SChris Wilson struct intel_gt *gt = vma->vm->gt; 1027155ab883SChris Wilson unsigned long flags; 10283365e226SChris Wilson 10293365e226SChris Wilson GEM_BUG_ON(i915_vma_is_closed(vma)); 10303365e226SChris Wilson 10313365e226SChris Wilson /* 10323365e226SChris Wilson * We defer actually closing, unbinding and destroying the VMA until 10333365e226SChris Wilson * the next idle point, or if the object is freed in the meantime. By 10343365e226SChris Wilson * postponing the unbind, we allow for it to be resurrected by the 10353365e226SChris Wilson * client, avoiding the work required to rebind the VMA. This is 10363365e226SChris Wilson * advantageous for DRI, where the client/server pass objects 10373365e226SChris Wilson * between themselves, temporarily opening a local VMA to the 10383365e226SChris Wilson * object, and then closing it again. The same object is then reused 10393365e226SChris Wilson * on the next frame (or two, depending on the depth of the swap queue) 10403365e226SChris Wilson * causing us to rebind the VMA once more. This ends up being a lot 10413365e226SChris Wilson * of wasted work for the steady state. 10423365e226SChris Wilson */ 104371e51ca8SChris Wilson spin_lock_irqsave(>->closed_lock, flags); 104471e51ca8SChris Wilson list_add(&vma->closed_link, >->closed_vma); 104571e51ca8SChris Wilson spin_unlock_irqrestore(>->closed_lock, flags); 1046155ab883SChris Wilson } 1047155ab883SChris Wilson 1048155ab883SChris Wilson static void __i915_vma_remove_closed(struct i915_vma *vma) 1049155ab883SChris Wilson { 105071e51ca8SChris Wilson struct intel_gt *gt = vma->vm->gt; 1051155ab883SChris Wilson 105271e51ca8SChris Wilson spin_lock_irq(>->closed_lock); 1053155ab883SChris Wilson list_del_init(&vma->closed_link); 105471e51ca8SChris Wilson spin_unlock_irq(>->closed_lock); 10553365e226SChris Wilson } 10563365e226SChris Wilson 10573365e226SChris Wilson void i915_vma_reopen(struct i915_vma *vma) 10583365e226SChris Wilson { 10592850748eSChris Wilson if (i915_vma_is_closed(vma)) 1060155ab883SChris Wilson __i915_vma_remove_closed(vma); 10613365e226SChris Wilson } 10623365e226SChris Wilson 106376f9764cSChris Wilson void i915_vma_release(struct kref *ref) 1064b42fe9caSJoonas Lahtinen { 106576f9764cSChris Wilson struct i915_vma *vma = container_of(ref, typeof(*vma), ref); 106676f9764cSChris Wilson 10672850748eSChris Wilson if (drm_mm_node_allocated(&vma->node)) { 10682850748eSChris Wilson mutex_lock(&vma->vm->mutex); 10692850748eSChris Wilson atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 10702850748eSChris Wilson WARN_ON(__i915_vma_unbind(vma)); 10712850748eSChris Wilson mutex_unlock(&vma->vm->mutex); 1072b290a78bSChris Wilson GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 10732850748eSChris Wilson } 10742850748eSChris Wilson GEM_BUG_ON(i915_vma_is_active(vma)); 1075b42fe9caSJoonas Lahtinen 1076528cbd17SChris Wilson if (vma->obj) { 1077528cbd17SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 1078528cbd17SChris Wilson 1079528cbd17SChris Wilson spin_lock(&obj->vma.lock); 1080528cbd17SChris Wilson list_del(&vma->obj_link); 10812850748eSChris Wilson rb_erase(&vma->obj_node, &obj->vma.tree); 1082528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 1083528cbd17SChris Wilson } 1084010e3e68SChris Wilson 1085155ab883SChris Wilson __i915_vma_remove_closed(vma); 10862850748eSChris Wilson i915_vm_put(vma->vm); 10873365e226SChris Wilson 10882850748eSChris Wilson i915_active_fini(&vma->active); 10892850748eSChris Wilson i915_vma_free(vma); 10903365e226SChris Wilson } 10913365e226SChris Wilson 109271e51ca8SChris Wilson void i915_vma_parked(struct intel_gt *gt) 10933365e226SChris Wilson { 10943365e226SChris Wilson struct i915_vma *vma, *next; 10953447c4c5SChris Wilson LIST_HEAD(closed); 10963365e226SChris Wilson 109771e51ca8SChris Wilson spin_lock_irq(>->closed_lock); 109871e51ca8SChris Wilson list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { 10992850748eSChris Wilson struct drm_i915_gem_object *obj = vma->obj; 11002850748eSChris Wilson struct i915_address_space *vm = vma->vm; 11012850748eSChris Wilson 11022850748eSChris Wilson /* XXX All to avoid keeping a reference on i915_vma itself */ 11032850748eSChris Wilson 11042850748eSChris Wilson if (!kref_get_unless_zero(&obj->base.refcount)) 11052850748eSChris Wilson continue; 11062850748eSChris Wilson 11073447c4c5SChris Wilson if (!i915_vm_tryopen(vm)) { 11082850748eSChris Wilson i915_gem_object_put(obj); 11093447c4c5SChris Wilson continue; 11102850748eSChris Wilson } 11112850748eSChris Wilson 11123447c4c5SChris Wilson list_move(&vma->closed_link, &closed); 11133447c4c5SChris Wilson } 111471e51ca8SChris Wilson spin_unlock_irq(>->closed_lock); 11153365e226SChris Wilson 11163447c4c5SChris Wilson /* As the GT is held idle, no vma can be reopened as we destroy them */ 11173447c4c5SChris Wilson list_for_each_entry_safe(vma, next, &closed, closed_link) { 11183447c4c5SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 11193447c4c5SChris Wilson struct i915_address_space *vm = vma->vm; 11203447c4c5SChris Wilson 11213447c4c5SChris Wilson INIT_LIST_HEAD(&vma->closed_link); 112276f9764cSChris Wilson __i915_vma_put(vma); 11233447c4c5SChris Wilson 11242850748eSChris Wilson i915_gem_object_put(obj); 11252850748eSChris Wilson i915_vm_close(vm); 1126155ab883SChris Wilson } 1127b42fe9caSJoonas Lahtinen } 1128b42fe9caSJoonas Lahtinen 1129b42fe9caSJoonas Lahtinen static void __i915_vma_iounmap(struct i915_vma *vma) 1130b42fe9caSJoonas Lahtinen { 1131b42fe9caSJoonas Lahtinen GEM_BUG_ON(i915_vma_is_pinned(vma)); 1132b42fe9caSJoonas Lahtinen 1133b42fe9caSJoonas Lahtinen if (vma->iomap == NULL) 1134b42fe9caSJoonas Lahtinen return; 1135b42fe9caSJoonas Lahtinen 1136b42fe9caSJoonas Lahtinen io_mapping_unmap(vma->iomap); 1137b42fe9caSJoonas Lahtinen vma->iomap = NULL; 1138b42fe9caSJoonas Lahtinen } 1139b42fe9caSJoonas Lahtinen 1140a65adaf8SChris Wilson void i915_vma_revoke_mmap(struct i915_vma *vma) 1141a65adaf8SChris Wilson { 1142cc662126SAbdiel Janulgue struct drm_vma_offset_node *node; 1143a65adaf8SChris Wilson u64 vma_offset; 1144a65adaf8SChris Wilson 1145a65adaf8SChris Wilson if (!i915_vma_has_userfault(vma)) 1146a65adaf8SChris Wilson return; 1147a65adaf8SChris Wilson 1148a65adaf8SChris Wilson GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 1149a65adaf8SChris Wilson GEM_BUG_ON(!vma->obj->userfault_count); 1150a65adaf8SChris Wilson 1151cc662126SAbdiel Janulgue node = &vma->mmo->vma_node; 1152a65adaf8SChris Wilson vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 1153a65adaf8SChris Wilson unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 1154a65adaf8SChris Wilson drm_vma_node_offset_addr(node) + vma_offset, 1155a65adaf8SChris Wilson vma->size, 1156a65adaf8SChris Wilson 1); 1157a65adaf8SChris Wilson 1158a65adaf8SChris Wilson i915_vma_unset_userfault(vma); 1159a65adaf8SChris Wilson if (!--vma->obj->userfault_count) 1160a65adaf8SChris Wilson list_del(&vma->obj->userfault_link); 1161a65adaf8SChris Wilson } 1162a65adaf8SChris Wilson 11632850748eSChris Wilson int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) 11642850748eSChris Wilson { 11652850748eSChris Wilson int err; 11662850748eSChris Wilson 11672850748eSChris Wilson GEM_BUG_ON(!i915_vma_is_pinned(vma)); 11682850748eSChris Wilson 11692850748eSChris Wilson /* Wait for the vma to be bound before we start! */ 117029e6ecf3SChris Wilson err = i915_request_await_active(rq, &vma->active, 0); 11712850748eSChris Wilson if (err) 11722850748eSChris Wilson return err; 11732850748eSChris Wilson 11742850748eSChris Wilson return i915_active_add_request(&vma->active, rq); 11752850748eSChris Wilson } 11762850748eSChris Wilson 1177e6bb1d7fSChris Wilson int i915_vma_move_to_active(struct i915_vma *vma, 1178e6bb1d7fSChris Wilson struct i915_request *rq, 1179e6bb1d7fSChris Wilson unsigned int flags) 1180e6bb1d7fSChris Wilson { 1181e6bb1d7fSChris Wilson struct drm_i915_gem_object *obj = vma->obj; 1182a93615f9SChris Wilson int err; 1183e6bb1d7fSChris Wilson 11846951e589SChris Wilson assert_object_held(obj); 1185e6bb1d7fSChris Wilson 11862850748eSChris Wilson err = __i915_vma_move_to_active(vma, rq); 1187a93615f9SChris Wilson if (unlikely(err)) 1188a93615f9SChris Wilson return err; 1189e6bb1d7fSChris Wilson 1190e6bb1d7fSChris Wilson if (flags & EXEC_OBJECT_WRITE) { 1191da42104fSChris Wilson struct intel_frontbuffer *front; 1192da42104fSChris Wilson 1193da42104fSChris Wilson front = __intel_frontbuffer_get(obj); 1194da42104fSChris Wilson if (unlikely(front)) { 1195da42104fSChris Wilson if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) 1196da42104fSChris Wilson i915_active_add_request(&front->write, rq); 1197da42104fSChris Wilson intel_frontbuffer_put(front); 1198da42104fSChris Wilson } 1199e6bb1d7fSChris Wilson 1200829e8defSRodrigo Vivi dma_resv_add_excl_fence(vma->resv, &rq->fence); 1201cd2a4eafSChris Wilson obj->write_domain = I915_GEM_DOMAIN_RENDER; 1202e6bb1d7fSChris Wilson obj->read_domains = 0; 1203cd2a4eafSChris Wilson } else { 1204829e8defSRodrigo Vivi err = dma_resv_reserve_shared(vma->resv, 1); 1205cd2a4eafSChris Wilson if (unlikely(err)) 1206cd2a4eafSChris Wilson return err; 1207cd2a4eafSChris Wilson 1208829e8defSRodrigo Vivi dma_resv_add_shared_fence(vma->resv, &rq->fence); 1209cd2a4eafSChris Wilson obj->write_domain = 0; 1210e6bb1d7fSChris Wilson } 121163baf4f3SChris Wilson 121263baf4f3SChris Wilson if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) 121363baf4f3SChris Wilson i915_active_add_request(&vma->fence->active, rq); 121463baf4f3SChris Wilson 1215e6bb1d7fSChris Wilson obj->read_domains |= I915_GEM_GPU_DOMAINS; 1216a93615f9SChris Wilson obj->mm.dirty = true; 1217e6bb1d7fSChris Wilson 1218a93615f9SChris Wilson GEM_BUG_ON(!i915_vma_is_active(vma)); 1219e6bb1d7fSChris Wilson return 0; 1220e6bb1d7fSChris Wilson } 1221e6bb1d7fSChris Wilson 12222850748eSChris Wilson int __i915_vma_unbind(struct i915_vma *vma) 1223b42fe9caSJoonas Lahtinen { 1224b42fe9caSJoonas Lahtinen int ret; 1225b42fe9caSJoonas Lahtinen 12262850748eSChris Wilson lockdep_assert_held(&vma->vm->mutex); 1227b42fe9caSJoonas Lahtinen 122810195b1eSChris Wilson if (i915_vma_is_pinned(vma)) { 122910195b1eSChris Wilson vma_print_allocator(vma, "is pinned"); 1230d3e48352SChris Wilson return -EAGAIN; 123110195b1eSChris Wilson } 1232b42fe9caSJoonas Lahtinen 123360e94557SChris Wilson /* 123460e94557SChris Wilson * After confirming that no one else is pinning this vma, wait for 123560e94557SChris Wilson * any laggards who may have crept in during the wait (through 123660e94557SChris Wilson * a residual pin skipping the vm->mutex) to complete. 123760e94557SChris Wilson */ 123860e94557SChris Wilson ret = i915_vma_sync(vma); 123960e94557SChris Wilson if (ret) 124060e94557SChris Wilson return ret; 124160e94557SChris Wilson 1242b42fe9caSJoonas Lahtinen if (!drm_mm_node_allocated(&vma->node)) 12433365e226SChris Wilson return 0; 1244b42fe9caSJoonas Lahtinen 124560e94557SChris Wilson GEM_BUG_ON(i915_vma_is_pinned(vma)); 124660e94557SChris Wilson GEM_BUG_ON(i915_vma_is_active(vma)); 124760e94557SChris Wilson 1248b42fe9caSJoonas Lahtinen if (i915_vma_is_map_and_fenceable(vma)) { 12499657aaa2SChris Wilson /* Force a pagefault for domain tracking on next user access */ 12509657aaa2SChris Wilson i915_vma_revoke_mmap(vma); 12519657aaa2SChris Wilson 12527125397bSChris Wilson /* 12537125397bSChris Wilson * Check that we have flushed all writes through the GGTT 12547125397bSChris Wilson * before the unbind, other due to non-strict nature of those 12557125397bSChris Wilson * indirect writes they may end up referencing the GGTT PTE 12567125397bSChris Wilson * after the unbind. 12575424f5d7SChris Wilson * 12585424f5d7SChris Wilson * Note that we may be concurrently poking at the GGTT_WRITE 12595424f5d7SChris Wilson * bit from set-domain, as we mark all GGTT vma associated 12605424f5d7SChris Wilson * with an object. We know this is for another vma, as we 12615424f5d7SChris Wilson * are currently unbinding this one -- so if this vma will be 12625424f5d7SChris Wilson * reused, it will be refaulted and have its dirty bit set 12635424f5d7SChris Wilson * before the next write. 12647125397bSChris Wilson */ 12657125397bSChris Wilson i915_vma_flush_writes(vma); 12667125397bSChris Wilson 1267b42fe9caSJoonas Lahtinen /* release the fence reg _after_ flushing */ 12680d86ee35SChris Wilson i915_vma_revoke_fence(vma); 1269b42fe9caSJoonas Lahtinen 1270b42fe9caSJoonas Lahtinen __i915_vma_iounmap(vma); 12714dd2fbbfSChris Wilson clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); 1272b42fe9caSJoonas Lahtinen } 1273a65adaf8SChris Wilson GEM_BUG_ON(vma->fence); 1274a65adaf8SChris Wilson GEM_BUG_ON(i915_vma_has_userfault(vma)); 1275b42fe9caSJoonas Lahtinen 12762850748eSChris Wilson if (likely(atomic_read(&vma->vm->open))) { 1277b42fe9caSJoonas Lahtinen trace_i915_vma_unbind(vma); 127893f2cde2SChris Wilson vma->ops->unbind_vma(vma); 1279b42fe9caSJoonas Lahtinen } 12805424f5d7SChris Wilson atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), 12815424f5d7SChris Wilson &vma->flags); 1282b42fe9caSJoonas Lahtinen 1283dde01d94SChris Wilson i915_vma_detach(vma); 12842850748eSChris Wilson vma_unbind_pages(vma); 1285b42fe9caSJoonas Lahtinen 128676f9764cSChris Wilson drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ 1287b42fe9caSJoonas Lahtinen return 0; 1288b42fe9caSJoonas Lahtinen } 1289b42fe9caSJoonas Lahtinen 12902850748eSChris Wilson int i915_vma_unbind(struct i915_vma *vma) 12912850748eSChris Wilson { 12922850748eSChris Wilson struct i915_address_space *vm = vma->vm; 1293c0e60347SChris Wilson intel_wakeref_t wakeref = 0; 12942850748eSChris Wilson int err; 12952850748eSChris Wilson 1296e6ba7648SChris Wilson if (!drm_mm_node_allocated(&vma->node)) 1297e6ba7648SChris Wilson return 0; 1298e6ba7648SChris Wilson 1299d62f416fSChris Wilson /* Optimistic wait before taking the mutex */ 1300d62f416fSChris Wilson err = i915_vma_sync(vma); 1301d62f416fSChris Wilson if (err) 1302d62f416fSChris Wilson goto out_rpm; 1303d62f416fSChris Wilson 1304614654abSChris Wilson if (i915_vma_is_pinned(vma)) { 1305614654abSChris Wilson vma_print_allocator(vma, "is pinned"); 1306614654abSChris Wilson return -EAGAIN; 1307614654abSChris Wilson } 1308614654abSChris Wilson 1309614654abSChris Wilson if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 1310614654abSChris Wilson /* XXX not always required: nop_clear_range */ 1311614654abSChris Wilson wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); 1312614654abSChris Wilson 1313d0024911SChris Wilson err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); 13142850748eSChris Wilson if (err) 1315d62f416fSChris Wilson goto out_rpm; 13162850748eSChris Wilson 13172850748eSChris Wilson err = __i915_vma_unbind(vma); 13182850748eSChris Wilson mutex_unlock(&vm->mutex); 13192850748eSChris Wilson 1320d62f416fSChris Wilson out_rpm: 1321c0e60347SChris Wilson if (wakeref) 1322c0e60347SChris Wilson intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); 13232850748eSChris Wilson return err; 13242850748eSChris Wilson } 13252850748eSChris Wilson 13261aff1903SChris Wilson struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) 13271aff1903SChris Wilson { 13281aff1903SChris Wilson i915_gem_object_make_unshrinkable(vma->obj); 13291aff1903SChris Wilson return vma; 13301aff1903SChris Wilson } 13311aff1903SChris Wilson 13321aff1903SChris Wilson void i915_vma_make_shrinkable(struct i915_vma *vma) 13331aff1903SChris Wilson { 13341aff1903SChris Wilson i915_gem_object_make_shrinkable(vma->obj); 13351aff1903SChris Wilson } 13361aff1903SChris Wilson 13371aff1903SChris Wilson void i915_vma_make_purgeable(struct i915_vma *vma) 13381aff1903SChris Wilson { 13391aff1903SChris Wilson i915_gem_object_make_purgeable(vma->obj); 13401aff1903SChris Wilson } 13411aff1903SChris Wilson 1342e3c7a1c5SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1343e3c7a1c5SChris Wilson #include "selftests/i915_vma.c" 1344e3c7a1c5SChris Wilson #endif 134513f1bfd3SChris Wilson 1346103b76eeSChris Wilson static void i915_global_vma_shrink(void) 1347103b76eeSChris Wilson { 1348103b76eeSChris Wilson kmem_cache_shrink(global.slab_vmas); 1349103b76eeSChris Wilson } 1350103b76eeSChris Wilson 1351103b76eeSChris Wilson static void i915_global_vma_exit(void) 1352103b76eeSChris Wilson { 1353103b76eeSChris Wilson kmem_cache_destroy(global.slab_vmas); 1354103b76eeSChris Wilson } 1355103b76eeSChris Wilson 1356103b76eeSChris Wilson static struct i915_global_vma global = { { 1357103b76eeSChris Wilson .shrink = i915_global_vma_shrink, 1358103b76eeSChris Wilson .exit = i915_global_vma_exit, 1359103b76eeSChris Wilson } }; 1360103b76eeSChris Wilson 136113f1bfd3SChris Wilson int __init i915_global_vma_init(void) 136213f1bfd3SChris Wilson { 136313f1bfd3SChris Wilson global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 136413f1bfd3SChris Wilson if (!global.slab_vmas) 136513f1bfd3SChris Wilson return -ENOMEM; 136613f1bfd3SChris Wilson 1367103b76eeSChris Wilson i915_global_register(&global.base); 136813f1bfd3SChris Wilson return 0; 136913f1bfd3SChris Wilson } 1370