1b42fe9caSJoonas Lahtinen /* 2b42fe9caSJoonas Lahtinen * Copyright © 2016 Intel Corporation 3b42fe9caSJoonas Lahtinen * 4b42fe9caSJoonas Lahtinen * Permission is hereby granted, free of charge, to any person obtaining a 5b42fe9caSJoonas Lahtinen * copy of this software and associated documentation files (the "Software"), 6b42fe9caSJoonas Lahtinen * to deal in the Software without restriction, including without limitation 7b42fe9caSJoonas Lahtinen * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8b42fe9caSJoonas Lahtinen * and/or sell copies of the Software, and to permit persons to whom the 9b42fe9caSJoonas Lahtinen * Software is furnished to do so, subject to the following conditions: 10b42fe9caSJoonas Lahtinen * 11b42fe9caSJoonas Lahtinen * The above copyright notice and this permission notice (including the next 12b42fe9caSJoonas Lahtinen * paragraph) shall be included in all copies or substantial portions of the 13b42fe9caSJoonas Lahtinen * Software. 14b42fe9caSJoonas Lahtinen * 15b42fe9caSJoonas Lahtinen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16b42fe9caSJoonas Lahtinen * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17b42fe9caSJoonas Lahtinen * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18b42fe9caSJoonas Lahtinen * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19b42fe9caSJoonas Lahtinen * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20b42fe9caSJoonas Lahtinen * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21b42fe9caSJoonas Lahtinen * IN THE SOFTWARE. 22b42fe9caSJoonas Lahtinen * 23b42fe9caSJoonas Lahtinen */ 24b42fe9caSJoonas Lahtinen 25b42fe9caSJoonas Lahtinen #include "i915_vma.h" 26b42fe9caSJoonas Lahtinen 27b42fe9caSJoonas Lahtinen #include "i915_drv.h" 28b42fe9caSJoonas Lahtinen #include "intel_ringbuffer.h" 29b42fe9caSJoonas Lahtinen #include "intel_frontbuffer.h" 30b42fe9caSJoonas Lahtinen 31b42fe9caSJoonas Lahtinen #include <drm/drm_gem.h> 32b42fe9caSJoonas Lahtinen 3313f1bfd3SChris Wilson static struct i915_global_vma { 3413f1bfd3SChris Wilson struct kmem_cache *slab_vmas; 3513f1bfd3SChris Wilson } global; 3613f1bfd3SChris Wilson 3713f1bfd3SChris Wilson struct i915_vma *i915_vma_alloc(void) 3813f1bfd3SChris Wilson { 3913f1bfd3SChris Wilson return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); 4013f1bfd3SChris Wilson } 4113f1bfd3SChris Wilson 4213f1bfd3SChris Wilson void i915_vma_free(struct i915_vma *vma) 4313f1bfd3SChris Wilson { 4413f1bfd3SChris Wilson return kmem_cache_free(global.slab_vmas, vma); 4513f1bfd3SChris Wilson } 4613f1bfd3SChris Wilson 471eca65d9SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) 4810195b1eSChris Wilson 4910195b1eSChris Wilson #include <linux/stackdepot.h> 5010195b1eSChris Wilson 5110195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason) 5210195b1eSChris Wilson { 5310195b1eSChris Wilson unsigned long entries[12]; 5410195b1eSChris Wilson struct stack_trace trace = { 5510195b1eSChris Wilson .entries = entries, 5610195b1eSChris Wilson .max_entries = ARRAY_SIZE(entries), 5710195b1eSChris Wilson }; 5810195b1eSChris Wilson char buf[512]; 5910195b1eSChris Wilson 6010195b1eSChris Wilson if (!vma->node.stack) { 6110195b1eSChris Wilson DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 6210195b1eSChris Wilson vma->node.start, vma->node.size, reason); 6310195b1eSChris Wilson return; 6410195b1eSChris Wilson } 6510195b1eSChris Wilson 6610195b1eSChris Wilson depot_fetch_stack(vma->node.stack, &trace); 6710195b1eSChris Wilson snprint_stack_trace(buf, sizeof(buf), &trace, 0); 6810195b1eSChris Wilson DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", 6910195b1eSChris Wilson vma->node.start, vma->node.size, reason, buf); 7010195b1eSChris Wilson } 7110195b1eSChris Wilson 7210195b1eSChris Wilson #else 7310195b1eSChris Wilson 7410195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason) 7510195b1eSChris Wilson { 7610195b1eSChris Wilson } 7710195b1eSChris Wilson 7810195b1eSChris Wilson #endif 7910195b1eSChris Wilson 8064d6c500SChris Wilson static void obj_bump_mru(struct drm_i915_gem_object *obj) 81b42fe9caSJoonas Lahtinen { 8264d6c500SChris Wilson struct drm_i915_private *i915 = to_i915(obj->base.dev); 83b42fe9caSJoonas Lahtinen 8464d6c500SChris Wilson spin_lock(&i915->mm.obj_lock); 8564d6c500SChris Wilson if (obj->bind_count) 8664d6c500SChris Wilson list_move_tail(&obj->mm.link, &i915->mm.bound_list); 8764d6c500SChris Wilson spin_unlock(&i915->mm.obj_lock); 8864d6c500SChris Wilson 8964d6c500SChris Wilson obj->mm.dirty = true; /* be paranoid */ 9064d6c500SChris Wilson } 9164d6c500SChris Wilson 9264d6c500SChris Wilson static void __i915_vma_retire(struct i915_active *ref) 9364d6c500SChris Wilson { 9464d6c500SChris Wilson struct i915_vma *vma = container_of(ref, typeof(*vma), active); 9564d6c500SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 96b42fe9caSJoonas Lahtinen 97b42fe9caSJoonas Lahtinen GEM_BUG_ON(!i915_gem_object_is_active(obj)); 98b42fe9caSJoonas Lahtinen if (--obj->active_count) 99b42fe9caSJoonas Lahtinen return; 100b42fe9caSJoonas Lahtinen 1011ab22356SChris Wilson /* Prune the shared fence arrays iff completely idle (inc. external) */ 1021ab22356SChris Wilson if (reservation_object_trylock(obj->resv)) { 1031ab22356SChris Wilson if (reservation_object_test_signaled_rcu(obj->resv, true)) 1041ab22356SChris Wilson reservation_object_add_excl_fence(obj->resv, NULL); 1051ab22356SChris Wilson reservation_object_unlock(obj->resv); 1061ab22356SChris Wilson } 1071ab22356SChris Wilson 10864d6c500SChris Wilson /* 10964d6c500SChris Wilson * Bump our place on the bound list to keep it roughly in LRU order 110b42fe9caSJoonas Lahtinen * so that we don't steal from recently used but inactive objects 111b42fe9caSJoonas Lahtinen * (unless we are forced to ofc!) 112b42fe9caSJoonas Lahtinen */ 11364d6c500SChris Wilson obj_bump_mru(obj); 114b42fe9caSJoonas Lahtinen 115b42fe9caSJoonas Lahtinen if (i915_gem_object_has_active_reference(obj)) { 116b42fe9caSJoonas Lahtinen i915_gem_object_clear_active_reference(obj); 117b42fe9caSJoonas Lahtinen i915_gem_object_put(obj); 118b42fe9caSJoonas Lahtinen } 119b42fe9caSJoonas Lahtinen } 120b42fe9caSJoonas Lahtinen 121b42fe9caSJoonas Lahtinen static struct i915_vma * 122a01cb37aSChris Wilson vma_create(struct drm_i915_gem_object *obj, 123b42fe9caSJoonas Lahtinen struct i915_address_space *vm, 124b42fe9caSJoonas Lahtinen const struct i915_ggtt_view *view) 125b42fe9caSJoonas Lahtinen { 126b42fe9caSJoonas Lahtinen struct i915_vma *vma; 127b42fe9caSJoonas Lahtinen struct rb_node *rb, **p; 128b42fe9caSJoonas Lahtinen 129e1cc3db0SChris Wilson /* The aliasing_ppgtt should never be used directly! */ 13082ad6443SChris Wilson GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); 131e1cc3db0SChris Wilson 13213f1bfd3SChris Wilson vma = i915_vma_alloc(); 133b42fe9caSJoonas Lahtinen if (vma == NULL) 134b42fe9caSJoonas Lahtinen return ERR_PTR(-ENOMEM); 135b42fe9caSJoonas Lahtinen 13664d6c500SChris Wilson i915_active_init(vm->i915, &vma->active, __i915_vma_retire); 13721950ee7SChris Wilson INIT_ACTIVE_REQUEST(&vma->last_fence); 13864d6c500SChris Wilson 139b42fe9caSJoonas Lahtinen vma->vm = vm; 14093f2cde2SChris Wilson vma->ops = &vm->vma_ops; 141b42fe9caSJoonas Lahtinen vma->obj = obj; 14295ff7c7dSChris Wilson vma->resv = obj->resv; 143b42fe9caSJoonas Lahtinen vma->size = obj->base.size; 144f51455d4SChris Wilson vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 145b42fe9caSJoonas Lahtinen 1467c518460SChris Wilson if (view && view->type != I915_GGTT_VIEW_NORMAL) { 147b42fe9caSJoonas Lahtinen vma->ggtt_view = *view; 148b42fe9caSJoonas Lahtinen if (view->type == I915_GGTT_VIEW_PARTIAL) { 14907e19ea4SChris Wilson GEM_BUG_ON(range_overflows_t(u64, 1508bab1193SChris Wilson view->partial.offset, 1518bab1193SChris Wilson view->partial.size, 15207e19ea4SChris Wilson obj->base.size >> PAGE_SHIFT)); 1538bab1193SChris Wilson vma->size = view->partial.size; 154b42fe9caSJoonas Lahtinen vma->size <<= PAGE_SHIFT; 1557e7367d3SChris Wilson GEM_BUG_ON(vma->size > obj->base.size); 156b42fe9caSJoonas Lahtinen } else if (view->type == I915_GGTT_VIEW_ROTATED) { 1578bab1193SChris Wilson vma->size = intel_rotation_info_size(&view->rotated); 158b42fe9caSJoonas Lahtinen vma->size <<= PAGE_SHIFT; 159b42fe9caSJoonas Lahtinen } 160b42fe9caSJoonas Lahtinen } 161b42fe9caSJoonas Lahtinen 1621fcdaa7eSChris Wilson if (unlikely(vma->size > vm->total)) 1631fcdaa7eSChris Wilson goto err_vma; 1641fcdaa7eSChris Wilson 165b00ddb27SChris Wilson GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 166b00ddb27SChris Wilson 167b42fe9caSJoonas Lahtinen if (i915_is_ggtt(vm)) { 1681fcdaa7eSChris Wilson if (unlikely(overflows_type(vma->size, u32))) 1691fcdaa7eSChris Wilson goto err_vma; 1701fcdaa7eSChris Wilson 17191d4e0aaSChris Wilson vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 172944397f0SChris Wilson i915_gem_object_get_tiling(obj), 173944397f0SChris Wilson i915_gem_object_get_stride(obj)); 1741fcdaa7eSChris Wilson if (unlikely(vma->fence_size < vma->size || /* overflow */ 1751fcdaa7eSChris Wilson vma->fence_size > vm->total)) 1761fcdaa7eSChris Wilson goto err_vma; 1771fcdaa7eSChris Wilson 178f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 179944397f0SChris Wilson 18091d4e0aaSChris Wilson vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, 181944397f0SChris Wilson i915_gem_object_get_tiling(obj), 182944397f0SChris Wilson i915_gem_object_get_stride(obj)); 183944397f0SChris Wilson GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); 184944397f0SChris Wilson 185528cbd17SChris Wilson vma->flags |= I915_VMA_GGTT; 186528cbd17SChris Wilson } 187528cbd17SChris Wilson 188528cbd17SChris Wilson spin_lock(&obj->vma.lock); 189528cbd17SChris Wilson 190528cbd17SChris Wilson rb = NULL; 191528cbd17SChris Wilson p = &obj->vma.tree.rb_node; 192528cbd17SChris Wilson while (*p) { 193528cbd17SChris Wilson struct i915_vma *pos; 194528cbd17SChris Wilson long cmp; 195528cbd17SChris Wilson 196528cbd17SChris Wilson rb = *p; 197528cbd17SChris Wilson pos = rb_entry(rb, struct i915_vma, obj_node); 198528cbd17SChris Wilson 199528cbd17SChris Wilson /* 200528cbd17SChris Wilson * If the view already exists in the tree, another thread 201528cbd17SChris Wilson * already created a matching vma, so return the older instance 202528cbd17SChris Wilson * and dispose of ours. 203528cbd17SChris Wilson */ 204528cbd17SChris Wilson cmp = i915_vma_compare(pos, vm, view); 205528cbd17SChris Wilson if (cmp == 0) { 206528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 20713f1bfd3SChris Wilson i915_vma_free(vma); 208528cbd17SChris Wilson return pos; 209528cbd17SChris Wilson } 210528cbd17SChris Wilson 211528cbd17SChris Wilson if (cmp < 0) 212528cbd17SChris Wilson p = &rb->rb_right; 213528cbd17SChris Wilson else 214528cbd17SChris Wilson p = &rb->rb_left; 215528cbd17SChris Wilson } 216528cbd17SChris Wilson rb_link_node(&vma->obj_node, rb, p); 217528cbd17SChris Wilson rb_insert_color(&vma->obj_node, &obj->vma.tree); 218528cbd17SChris Wilson 219528cbd17SChris Wilson if (i915_vma_is_ggtt(vma)) 220e2189dd0SChris Wilson /* 221e2189dd0SChris Wilson * We put the GGTT vma at the start of the vma-list, followed 222e2189dd0SChris Wilson * by the ppGGTT vma. This allows us to break early when 223e2189dd0SChris Wilson * iterating over only the GGTT vma for an object, see 224e2189dd0SChris Wilson * for_each_ggtt_vma() 225e2189dd0SChris Wilson */ 226528cbd17SChris Wilson list_add(&vma->obj_link, &obj->vma.list); 227b42fe9caSJoonas Lahtinen else 228528cbd17SChris Wilson list_add_tail(&vma->obj_link, &obj->vma.list); 229528cbd17SChris Wilson 230528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 23109d7e46bSChris Wilson 23209d7e46bSChris Wilson mutex_lock(&vm->mutex); 2331fcdaa7eSChris Wilson list_add(&vma->vm_link, &vm->unbound_list); 23409d7e46bSChris Wilson mutex_unlock(&vm->mutex); 235b42fe9caSJoonas Lahtinen 236b42fe9caSJoonas Lahtinen return vma; 2371fcdaa7eSChris Wilson 2381fcdaa7eSChris Wilson err_vma: 23913f1bfd3SChris Wilson i915_vma_free(vma); 2401fcdaa7eSChris Wilson return ERR_PTR(-E2BIG); 241b42fe9caSJoonas Lahtinen } 242b42fe9caSJoonas Lahtinen 243481a6f7dSChris Wilson static struct i915_vma * 244481a6f7dSChris Wilson vma_lookup(struct drm_i915_gem_object *obj, 245718659a6SChris Wilson struct i915_address_space *vm, 246718659a6SChris Wilson const struct i915_ggtt_view *view) 247718659a6SChris Wilson { 248718659a6SChris Wilson struct rb_node *rb; 249718659a6SChris Wilson 250528cbd17SChris Wilson rb = obj->vma.tree.rb_node; 251718659a6SChris Wilson while (rb) { 252718659a6SChris Wilson struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); 253718659a6SChris Wilson long cmp; 254718659a6SChris Wilson 255718659a6SChris Wilson cmp = i915_vma_compare(vma, vm, view); 256718659a6SChris Wilson if (cmp == 0) 257718659a6SChris Wilson return vma; 258718659a6SChris Wilson 259718659a6SChris Wilson if (cmp < 0) 260718659a6SChris Wilson rb = rb->rb_right; 261718659a6SChris Wilson else 262718659a6SChris Wilson rb = rb->rb_left; 263718659a6SChris Wilson } 264718659a6SChris Wilson 265718659a6SChris Wilson return NULL; 266718659a6SChris Wilson } 267718659a6SChris Wilson 268718659a6SChris Wilson /** 269718659a6SChris Wilson * i915_vma_instance - return the singleton instance of the VMA 270718659a6SChris Wilson * @obj: parent &struct drm_i915_gem_object to be mapped 271718659a6SChris Wilson * @vm: address space in which the mapping is located 272718659a6SChris Wilson * @view: additional mapping requirements 273718659a6SChris Wilson * 274718659a6SChris Wilson * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with 275718659a6SChris Wilson * the same @view characteristics. If a match is not found, one is created. 276718659a6SChris Wilson * Once created, the VMA is kept until either the object is freed, or the 277718659a6SChris Wilson * address space is closed. 278718659a6SChris Wilson * 279718659a6SChris Wilson * Must be called with struct_mutex held. 280718659a6SChris Wilson * 281718659a6SChris Wilson * Returns the vma, or an error pointer. 282718659a6SChris Wilson */ 283718659a6SChris Wilson struct i915_vma * 284718659a6SChris Wilson i915_vma_instance(struct drm_i915_gem_object *obj, 285718659a6SChris Wilson struct i915_address_space *vm, 286718659a6SChris Wilson const struct i915_ggtt_view *view) 287718659a6SChris Wilson { 288718659a6SChris Wilson struct i915_vma *vma; 289718659a6SChris Wilson 290718659a6SChris Wilson GEM_BUG_ON(view && !i915_is_ggtt(vm)); 291718659a6SChris Wilson GEM_BUG_ON(vm->closed); 292718659a6SChris Wilson 293528cbd17SChris Wilson spin_lock(&obj->vma.lock); 294481a6f7dSChris Wilson vma = vma_lookup(obj, vm, view); 295528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 296528cbd17SChris Wilson 297528cbd17SChris Wilson /* vma_create() will resolve the race if another creates the vma */ 298528cbd17SChris Wilson if (unlikely(!vma)) 299a01cb37aSChris Wilson vma = vma_create(obj, vm, view); 300718659a6SChris Wilson 3014ea9527cSChris Wilson GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); 302718659a6SChris Wilson return vma; 303718659a6SChris Wilson } 304718659a6SChris Wilson 305718659a6SChris Wilson /** 306b42fe9caSJoonas Lahtinen * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 307b42fe9caSJoonas Lahtinen * @vma: VMA to map 308b42fe9caSJoonas Lahtinen * @cache_level: mapping cache level 309b42fe9caSJoonas Lahtinen * @flags: flags like global or local mapping 310b42fe9caSJoonas Lahtinen * 311b42fe9caSJoonas Lahtinen * DMA addresses are taken from the scatter-gather table of this object (or of 312b42fe9caSJoonas Lahtinen * this VMA in case of non-default GGTT views) and PTE entries set up. 313b42fe9caSJoonas Lahtinen * Note that DMA addresses are also the only part of the SG table we care about. 314b42fe9caSJoonas Lahtinen */ 315b42fe9caSJoonas Lahtinen int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 316b42fe9caSJoonas Lahtinen u32 flags) 317b42fe9caSJoonas Lahtinen { 318b42fe9caSJoonas Lahtinen u32 bind_flags; 319b42fe9caSJoonas Lahtinen u32 vma_flags; 320b42fe9caSJoonas Lahtinen int ret; 321b42fe9caSJoonas Lahtinen 322aa149431SChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 323aa149431SChris Wilson GEM_BUG_ON(vma->size > vma->node.size); 324aa149431SChris Wilson 325bbb8a9d7STvrtko Ursulin if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, 326aa149431SChris Wilson vma->node.size, 327aa149431SChris Wilson vma->vm->total))) 328aa149431SChris Wilson return -ENODEV; 329aa149431SChris Wilson 330bbb8a9d7STvrtko Ursulin if (GEM_DEBUG_WARN_ON(!flags)) 331b42fe9caSJoonas Lahtinen return -EINVAL; 332b42fe9caSJoonas Lahtinen 333b42fe9caSJoonas Lahtinen bind_flags = 0; 334b42fe9caSJoonas Lahtinen if (flags & PIN_GLOBAL) 335b42fe9caSJoonas Lahtinen bind_flags |= I915_VMA_GLOBAL_BIND; 336b42fe9caSJoonas Lahtinen if (flags & PIN_USER) 337b42fe9caSJoonas Lahtinen bind_flags |= I915_VMA_LOCAL_BIND; 338b42fe9caSJoonas Lahtinen 339b42fe9caSJoonas Lahtinen vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 340b42fe9caSJoonas Lahtinen if (flags & PIN_UPDATE) 341b42fe9caSJoonas Lahtinen bind_flags |= vma_flags; 342b42fe9caSJoonas Lahtinen else 343b42fe9caSJoonas Lahtinen bind_flags &= ~vma_flags; 344b42fe9caSJoonas Lahtinen if (bind_flags == 0) 345b42fe9caSJoonas Lahtinen return 0; 346b42fe9caSJoonas Lahtinen 347fa3f46afSMatthew Auld GEM_BUG_ON(!vma->pages); 348fa3f46afSMatthew Auld 3496146e6daSDaniele Ceraolo Spurio trace_i915_vma_bind(vma, bind_flags); 35093f2cde2SChris Wilson ret = vma->ops->bind_vma(vma, cache_level, bind_flags); 351b42fe9caSJoonas Lahtinen if (ret) 352b42fe9caSJoonas Lahtinen return ret; 353b42fe9caSJoonas Lahtinen 354b42fe9caSJoonas Lahtinen vma->flags |= bind_flags; 355b42fe9caSJoonas Lahtinen return 0; 356b42fe9caSJoonas Lahtinen } 357b42fe9caSJoonas Lahtinen 358b42fe9caSJoonas Lahtinen void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 359b42fe9caSJoonas Lahtinen { 360b42fe9caSJoonas Lahtinen void __iomem *ptr; 361b4563f59SChris Wilson int err; 362b42fe9caSJoonas Lahtinen 363b42fe9caSJoonas Lahtinen /* Access through the GTT requires the device to be awake. */ 36449d73912SChris Wilson assert_rpm_wakelock_held(vma->vm->i915); 365b42fe9caSJoonas Lahtinen 36649d73912SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 367b4563f59SChris Wilson if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { 368b4563f59SChris Wilson err = -ENODEV; 369b4563f59SChris Wilson goto err; 370b4563f59SChris Wilson } 371b42fe9caSJoonas Lahtinen 372b42fe9caSJoonas Lahtinen GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 373b42fe9caSJoonas Lahtinen GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); 374b42fe9caSJoonas Lahtinen 375b42fe9caSJoonas Lahtinen ptr = vma->iomap; 376b42fe9caSJoonas Lahtinen if (ptr == NULL) { 37773ebd503SMatthew Auld ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, 378b42fe9caSJoonas Lahtinen vma->node.start, 379b42fe9caSJoonas Lahtinen vma->node.size); 380b4563f59SChris Wilson if (ptr == NULL) { 381b4563f59SChris Wilson err = -ENOMEM; 382b4563f59SChris Wilson goto err; 383b4563f59SChris Wilson } 384b42fe9caSJoonas Lahtinen 385b42fe9caSJoonas Lahtinen vma->iomap = ptr; 386b42fe9caSJoonas Lahtinen } 387b42fe9caSJoonas Lahtinen 388b42fe9caSJoonas Lahtinen __i915_vma_pin(vma); 389b4563f59SChris Wilson 3903bd40735SChris Wilson err = i915_vma_pin_fence(vma); 391b4563f59SChris Wilson if (err) 392b4563f59SChris Wilson goto err_unpin; 393b4563f59SChris Wilson 3947125397bSChris Wilson i915_vma_set_ggtt_write(vma); 395b42fe9caSJoonas Lahtinen return ptr; 396b4563f59SChris Wilson 397b4563f59SChris Wilson err_unpin: 398b4563f59SChris Wilson __i915_vma_unpin(vma); 399b4563f59SChris Wilson err: 400b4563f59SChris Wilson return IO_ERR_PTR(err); 401b4563f59SChris Wilson } 402b4563f59SChris Wilson 4037125397bSChris Wilson void i915_vma_flush_writes(struct i915_vma *vma) 4047125397bSChris Wilson { 4057125397bSChris Wilson if (!i915_vma_has_ggtt_write(vma)) 4067125397bSChris Wilson return; 4077125397bSChris Wilson 4087125397bSChris Wilson i915_gem_flush_ggtt_writes(vma->vm->i915); 4097125397bSChris Wilson 4107125397bSChris Wilson i915_vma_unset_ggtt_write(vma); 4117125397bSChris Wilson } 4127125397bSChris Wilson 413b4563f59SChris Wilson void i915_vma_unpin_iomap(struct i915_vma *vma) 414b4563f59SChris Wilson { 415520ea7c5SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 416b4563f59SChris Wilson 417b4563f59SChris Wilson GEM_BUG_ON(vma->iomap == NULL); 418b4563f59SChris Wilson 4197125397bSChris Wilson i915_vma_flush_writes(vma); 4207125397bSChris Wilson 421b4563f59SChris Wilson i915_vma_unpin_fence(vma); 422b4563f59SChris Wilson i915_vma_unpin(vma); 423b42fe9caSJoonas Lahtinen } 424b42fe9caSJoonas Lahtinen 4256a2f59e4SChris Wilson void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) 426b42fe9caSJoonas Lahtinen { 427b42fe9caSJoonas Lahtinen struct i915_vma *vma; 428b42fe9caSJoonas Lahtinen struct drm_i915_gem_object *obj; 429b42fe9caSJoonas Lahtinen 430b42fe9caSJoonas Lahtinen vma = fetch_and_zero(p_vma); 431b42fe9caSJoonas Lahtinen if (!vma) 432b42fe9caSJoonas Lahtinen return; 433b42fe9caSJoonas Lahtinen 434b42fe9caSJoonas Lahtinen obj = vma->obj; 435520ea7c5SChris Wilson GEM_BUG_ON(!obj); 436b42fe9caSJoonas Lahtinen 437b42fe9caSJoonas Lahtinen i915_vma_unpin(vma); 438b42fe9caSJoonas Lahtinen i915_vma_close(vma); 439b42fe9caSJoonas Lahtinen 4406a2f59e4SChris Wilson if (flags & I915_VMA_RELEASE_MAP) 4416a2f59e4SChris Wilson i915_gem_object_unpin_map(obj); 4426a2f59e4SChris Wilson 443b42fe9caSJoonas Lahtinen __i915_gem_object_release_unless_active(obj); 444b42fe9caSJoonas Lahtinen } 445b42fe9caSJoonas Lahtinen 446782a3e9eSChris Wilson bool i915_vma_misplaced(const struct i915_vma *vma, 447782a3e9eSChris Wilson u64 size, u64 alignment, u64 flags) 448b42fe9caSJoonas Lahtinen { 449b42fe9caSJoonas Lahtinen if (!drm_mm_node_allocated(&vma->node)) 450b42fe9caSJoonas Lahtinen return false; 451b42fe9caSJoonas Lahtinen 452b42fe9caSJoonas Lahtinen if (vma->node.size < size) 453b42fe9caSJoonas Lahtinen return true; 454b42fe9caSJoonas Lahtinen 455f51455d4SChris Wilson GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 456f51455d4SChris Wilson if (alignment && !IS_ALIGNED(vma->node.start, alignment)) 457b42fe9caSJoonas Lahtinen return true; 458b42fe9caSJoonas Lahtinen 459b42fe9caSJoonas Lahtinen if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 460b42fe9caSJoonas Lahtinen return true; 461b42fe9caSJoonas Lahtinen 462b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_BIAS && 463b42fe9caSJoonas Lahtinen vma->node.start < (flags & PIN_OFFSET_MASK)) 464b42fe9caSJoonas Lahtinen return true; 465b42fe9caSJoonas Lahtinen 466b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_FIXED && 467b42fe9caSJoonas Lahtinen vma->node.start != (flags & PIN_OFFSET_MASK)) 468b42fe9caSJoonas Lahtinen return true; 469b42fe9caSJoonas Lahtinen 470b42fe9caSJoonas Lahtinen return false; 471b42fe9caSJoonas Lahtinen } 472b42fe9caSJoonas Lahtinen 473b42fe9caSJoonas Lahtinen void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 474b42fe9caSJoonas Lahtinen { 475b42fe9caSJoonas Lahtinen bool mappable, fenceable; 476b42fe9caSJoonas Lahtinen 477944397f0SChris Wilson GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 478944397f0SChris Wilson GEM_BUG_ON(!vma->fence_size); 479b42fe9caSJoonas Lahtinen 480b42fe9caSJoonas Lahtinen /* 481b42fe9caSJoonas Lahtinen * Explicitly disable for rotated VMA since the display does not 482b42fe9caSJoonas Lahtinen * need the fence and the VMA is not accessible to other users. 483b42fe9caSJoonas Lahtinen */ 484944397f0SChris Wilson if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 485944397f0SChris Wilson return; 486944397f0SChris Wilson 487944397f0SChris Wilson fenceable = (vma->node.size >= vma->fence_size && 488f51455d4SChris Wilson IS_ALIGNED(vma->node.start, vma->fence_alignment)); 489944397f0SChris Wilson 490944397f0SChris Wilson mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; 491944397f0SChris Wilson 492944397f0SChris Wilson if (mappable && fenceable) 493b42fe9caSJoonas Lahtinen vma->flags |= I915_VMA_CAN_FENCE; 494b42fe9caSJoonas Lahtinen else 495b42fe9caSJoonas Lahtinen vma->flags &= ~I915_VMA_CAN_FENCE; 496b42fe9caSJoonas Lahtinen } 497b42fe9caSJoonas Lahtinen 4987d1d9aeaSChris Wilson static bool color_differs(struct drm_mm_node *node, unsigned long color) 499b42fe9caSJoonas Lahtinen { 5007d1d9aeaSChris Wilson return node->allocated && node->color != color; 5017d1d9aeaSChris Wilson } 5027d1d9aeaSChris Wilson 5037d1d9aeaSChris Wilson bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) 5047d1d9aeaSChris Wilson { 5057d1d9aeaSChris Wilson struct drm_mm_node *node = &vma->node; 506b42fe9caSJoonas Lahtinen struct drm_mm_node *other; 507b42fe9caSJoonas Lahtinen 508b42fe9caSJoonas Lahtinen /* 509b42fe9caSJoonas Lahtinen * On some machines we have to be careful when putting differing types 510b42fe9caSJoonas Lahtinen * of snoopable memory together to avoid the prefetcher crossing memory 511b42fe9caSJoonas Lahtinen * domains and dying. During vm initialisation, we decide whether or not 512b42fe9caSJoonas Lahtinen * these constraints apply and set the drm_mm.color_adjust 513b42fe9caSJoonas Lahtinen * appropriately. 514b42fe9caSJoonas Lahtinen */ 515b42fe9caSJoonas Lahtinen if (vma->vm->mm.color_adjust == NULL) 516b42fe9caSJoonas Lahtinen return true; 517b42fe9caSJoonas Lahtinen 5187d1d9aeaSChris Wilson /* Only valid to be called on an already inserted vma */ 5197d1d9aeaSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(node)); 5207d1d9aeaSChris Wilson GEM_BUG_ON(list_empty(&node->node_list)); 521b42fe9caSJoonas Lahtinen 5227d1d9aeaSChris Wilson other = list_prev_entry(node, node_list); 523ef426c10SDaniel Vetter if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) 524b42fe9caSJoonas Lahtinen return false; 525b42fe9caSJoonas Lahtinen 5267d1d9aeaSChris Wilson other = list_next_entry(node, node_list); 527ef426c10SDaniel Vetter if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) 528b42fe9caSJoonas Lahtinen return false; 529b42fe9caSJoonas Lahtinen 530b42fe9caSJoonas Lahtinen return true; 531b42fe9caSJoonas Lahtinen } 532b42fe9caSJoonas Lahtinen 53383d317adSChris Wilson static void assert_bind_count(const struct drm_i915_gem_object *obj) 53483d317adSChris Wilson { 53583d317adSChris Wilson /* 53683d317adSChris Wilson * Combine the assertion that the object is bound and that we have 53783d317adSChris Wilson * pinned its pages. But we should never have bound the object 53883d317adSChris Wilson * more than we have pinned its pages. (For complete accuracy, we 53983d317adSChris Wilson * assume that no else is pinning the pages, but as a rough assertion 54083d317adSChris Wilson * that we will not run into problems later, this will do!) 54183d317adSChris Wilson */ 54283d317adSChris Wilson GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 54383d317adSChris Wilson } 54483d317adSChris Wilson 545b42fe9caSJoonas Lahtinen /** 546b42fe9caSJoonas Lahtinen * i915_vma_insert - finds a slot for the vma in its address space 547b42fe9caSJoonas Lahtinen * @vma: the vma 548b42fe9caSJoonas Lahtinen * @size: requested size in bytes (can be larger than the VMA) 549b42fe9caSJoonas Lahtinen * @alignment: required alignment 550b42fe9caSJoonas Lahtinen * @flags: mask of PIN_* flags to use 551b42fe9caSJoonas Lahtinen * 552b42fe9caSJoonas Lahtinen * First we try to allocate some free space that meets the requirements for 553b42fe9caSJoonas Lahtinen * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 554b42fe9caSJoonas Lahtinen * preferrably the oldest idle entry to make room for the new VMA. 555b42fe9caSJoonas Lahtinen * 556b42fe9caSJoonas Lahtinen * Returns: 557b42fe9caSJoonas Lahtinen * 0 on success, negative error code otherwise. 558b42fe9caSJoonas Lahtinen */ 559b42fe9caSJoonas Lahtinen static int 560b42fe9caSJoonas Lahtinen i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 561b42fe9caSJoonas Lahtinen { 56249d73912SChris Wilson struct drm_i915_private *dev_priv = vma->vm->i915; 563520ea7c5SChris Wilson unsigned int cache_level; 564b42fe9caSJoonas Lahtinen u64 start, end; 565b42fe9caSJoonas Lahtinen int ret; 566b42fe9caSJoonas Lahtinen 567010e3e68SChris Wilson GEM_BUG_ON(i915_vma_is_closed(vma)); 568b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 569b42fe9caSJoonas Lahtinen GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 570b42fe9caSJoonas Lahtinen 571b42fe9caSJoonas Lahtinen size = max(size, vma->size); 572944397f0SChris Wilson alignment = max(alignment, vma->display_alignment); 573944397f0SChris Wilson if (flags & PIN_MAPPABLE) { 574944397f0SChris Wilson size = max_t(typeof(size), size, vma->fence_size); 575944397f0SChris Wilson alignment = max_t(typeof(alignment), 576944397f0SChris Wilson alignment, vma->fence_alignment); 577944397f0SChris Wilson } 578b42fe9caSJoonas Lahtinen 579f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 580f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 581f51455d4SChris Wilson GEM_BUG_ON(!is_power_of_2(alignment)); 582f51455d4SChris Wilson 583b42fe9caSJoonas Lahtinen start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 584f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 585b42fe9caSJoonas Lahtinen 586b42fe9caSJoonas Lahtinen end = vma->vm->total; 587b42fe9caSJoonas Lahtinen if (flags & PIN_MAPPABLE) 588b42fe9caSJoonas Lahtinen end = min_t(u64, end, dev_priv->ggtt.mappable_end); 589b42fe9caSJoonas Lahtinen if (flags & PIN_ZONE_4G) 590f51455d4SChris Wilson end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); 591f51455d4SChris Wilson GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 592b42fe9caSJoonas Lahtinen 593b42fe9caSJoonas Lahtinen /* If binding the object/GGTT view requires more space than the entire 594b42fe9caSJoonas Lahtinen * aperture has, reject it early before evicting everything in a vain 595b42fe9caSJoonas Lahtinen * attempt to find space. 596b42fe9caSJoonas Lahtinen */ 597b42fe9caSJoonas Lahtinen if (size > end) { 598520ea7c5SChris Wilson DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", 599520ea7c5SChris Wilson size, flags & PIN_MAPPABLE ? "mappable" : "total", 600b42fe9caSJoonas Lahtinen end); 6012889caa9SChris Wilson return -ENOSPC; 602b42fe9caSJoonas Lahtinen } 603b42fe9caSJoonas Lahtinen 604520ea7c5SChris Wilson if (vma->obj) { 605520ea7c5SChris Wilson ret = i915_gem_object_pin_pages(vma->obj); 606b42fe9caSJoonas Lahtinen if (ret) 607b42fe9caSJoonas Lahtinen return ret; 608b42fe9caSJoonas Lahtinen 609520ea7c5SChris Wilson cache_level = vma->obj->cache_level; 610520ea7c5SChris Wilson } else { 611520ea7c5SChris Wilson cache_level = 0; 612520ea7c5SChris Wilson } 613520ea7c5SChris Wilson 614fa3f46afSMatthew Auld GEM_BUG_ON(vma->pages); 615fa3f46afSMatthew Auld 61693f2cde2SChris Wilson ret = vma->ops->set_pages(vma); 617fa3f46afSMatthew Auld if (ret) 618fa3f46afSMatthew Auld goto err_unpin; 619fa3f46afSMatthew Auld 620b42fe9caSJoonas Lahtinen if (flags & PIN_OFFSET_FIXED) { 621b42fe9caSJoonas Lahtinen u64 offset = flags & PIN_OFFSET_MASK; 622f51455d4SChris Wilson if (!IS_ALIGNED(offset, alignment) || 623e8f9ae9bSChris Wilson range_overflows(offset, size, end)) { 624b42fe9caSJoonas Lahtinen ret = -EINVAL; 625fa3f46afSMatthew Auld goto err_clear; 626b42fe9caSJoonas Lahtinen } 627b42fe9caSJoonas Lahtinen 628625d988aSChris Wilson ret = i915_gem_gtt_reserve(vma->vm, &vma->node, 629520ea7c5SChris Wilson size, offset, cache_level, 630625d988aSChris Wilson flags); 631b42fe9caSJoonas Lahtinen if (ret) 632fa3f46afSMatthew Auld goto err_clear; 633b42fe9caSJoonas Lahtinen } else { 6347464284bSMatthew Auld /* 6357464284bSMatthew Auld * We only support huge gtt pages through the 48b PPGTT, 6367464284bSMatthew Auld * however we also don't want to force any alignment for 6377464284bSMatthew Auld * objects which need to be tightly packed into the low 32bits. 6387464284bSMatthew Auld * 6397464284bSMatthew Auld * Note that we assume that GGTT are limited to 4GiB for the 6407464284bSMatthew Auld * forseeable future. See also i915_ggtt_offset(). 6417464284bSMatthew Auld */ 6427464284bSMatthew Auld if (upper_32_bits(end - 1) && 6437464284bSMatthew Auld vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { 644855822beSMatthew Auld /* 645855822beSMatthew Auld * We can't mix 64K and 4K PTEs in the same page-table 646855822beSMatthew Auld * (2M block), and so to avoid the ugliness and 647855822beSMatthew Auld * complexity of coloring we opt for just aligning 64K 648855822beSMatthew Auld * objects to 2M. 649855822beSMatthew Auld */ 6507464284bSMatthew Auld u64 page_alignment = 651855822beSMatthew Auld rounddown_pow_of_two(vma->page_sizes.sg | 652855822beSMatthew Auld I915_GTT_PAGE_SIZE_2M); 6537464284bSMatthew Auld 654bef27bdbSChris Wilson /* 655bef27bdbSChris Wilson * Check we don't expand for the limited Global GTT 656bef27bdbSChris Wilson * (mappable aperture is even more precious!). This 657bef27bdbSChris Wilson * also checks that we exclude the aliasing-ppgtt. 658bef27bdbSChris Wilson */ 659bef27bdbSChris Wilson GEM_BUG_ON(i915_vma_is_ggtt(vma)); 660bef27bdbSChris Wilson 6617464284bSMatthew Auld alignment = max(alignment, page_alignment); 662855822beSMatthew Auld 663855822beSMatthew Auld if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) 664855822beSMatthew Auld size = round_up(size, I915_GTT_PAGE_SIZE_2M); 6657464284bSMatthew Auld } 6667464284bSMatthew Auld 667e007b19dSChris Wilson ret = i915_gem_gtt_insert(vma->vm, &vma->node, 668520ea7c5SChris Wilson size, alignment, cache_level, 669e007b19dSChris Wilson start, end, flags); 670e007b19dSChris Wilson if (ret) 671fa3f46afSMatthew Auld goto err_clear; 672b42fe9caSJoonas Lahtinen 673b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->node.start < start); 674b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->node.start + vma->node.size > end); 675b42fe9caSJoonas Lahtinen } 67644a0ec0dSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 677520ea7c5SChris Wilson GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); 678b42fe9caSJoonas Lahtinen 67909d7e46bSChris Wilson mutex_lock(&vma->vm->mutex); 680499197dcSChris Wilson list_move_tail(&vma->vm_link, &vma->vm->bound_list); 68109d7e46bSChris Wilson mutex_unlock(&vma->vm->mutex); 682f2123818SChris Wilson 683520ea7c5SChris Wilson if (vma->obj) { 684520ea7c5SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 685520ea7c5SChris Wilson 686f2123818SChris Wilson spin_lock(&dev_priv->mm.obj_lock); 687f2123818SChris Wilson list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); 688b42fe9caSJoonas Lahtinen obj->bind_count++; 689f2123818SChris Wilson spin_unlock(&dev_priv->mm.obj_lock); 690f2123818SChris Wilson 69183d317adSChris Wilson assert_bind_count(obj); 692520ea7c5SChris Wilson } 693b42fe9caSJoonas Lahtinen 694b42fe9caSJoonas Lahtinen return 0; 695b42fe9caSJoonas Lahtinen 696fa3f46afSMatthew Auld err_clear: 69793f2cde2SChris Wilson vma->ops->clear_pages(vma); 698b42fe9caSJoonas Lahtinen err_unpin: 699520ea7c5SChris Wilson if (vma->obj) 700520ea7c5SChris Wilson i915_gem_object_unpin_pages(vma->obj); 701b42fe9caSJoonas Lahtinen return ret; 702b42fe9caSJoonas Lahtinen } 703b42fe9caSJoonas Lahtinen 70431c7effaSChris Wilson static void 70531c7effaSChris Wilson i915_vma_remove(struct i915_vma *vma) 70631c7effaSChris Wilson { 707f2123818SChris Wilson struct drm_i915_private *i915 = vma->vm->i915; 70831c7effaSChris Wilson 70931c7effaSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 71031c7effaSChris Wilson GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 71131c7effaSChris Wilson 71293f2cde2SChris Wilson vma->ops->clear_pages(vma); 713fa3f46afSMatthew Auld 71409d7e46bSChris Wilson mutex_lock(&vma->vm->mutex); 71531c7effaSChris Wilson drm_mm_remove_node(&vma->node); 71631c7effaSChris Wilson list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 71709d7e46bSChris Wilson mutex_unlock(&vma->vm->mutex); 71831c7effaSChris Wilson 719520ea7c5SChris Wilson /* 720520ea7c5SChris Wilson * Since the unbound list is global, only move to that list if 72131c7effaSChris Wilson * no more VMAs exist. 72231c7effaSChris Wilson */ 723520ea7c5SChris Wilson if (vma->obj) { 724520ea7c5SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 725520ea7c5SChris Wilson 726f2123818SChris Wilson spin_lock(&i915->mm.obj_lock); 72731c7effaSChris Wilson if (--obj->bind_count == 0) 728f2123818SChris Wilson list_move_tail(&obj->mm.link, &i915->mm.unbound_list); 729f2123818SChris Wilson spin_unlock(&i915->mm.obj_lock); 73031c7effaSChris Wilson 731520ea7c5SChris Wilson /* 732520ea7c5SChris Wilson * And finally now the object is completely decoupled from this 733520ea7c5SChris Wilson * vma, we can drop its hold on the backing storage and allow 734520ea7c5SChris Wilson * it to be reaped by the shrinker. 73531c7effaSChris Wilson */ 73631c7effaSChris Wilson i915_gem_object_unpin_pages(obj); 73783d317adSChris Wilson assert_bind_count(obj); 73831c7effaSChris Wilson } 739520ea7c5SChris Wilson } 74031c7effaSChris Wilson 741b42fe9caSJoonas Lahtinen int __i915_vma_do_pin(struct i915_vma *vma, 742b42fe9caSJoonas Lahtinen u64 size, u64 alignment, u64 flags) 743b42fe9caSJoonas Lahtinen { 74431c7effaSChris Wilson const unsigned int bound = vma->flags; 745b42fe9caSJoonas Lahtinen int ret; 746b42fe9caSJoonas Lahtinen 74749d73912SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 748b42fe9caSJoonas Lahtinen GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); 749b42fe9caSJoonas Lahtinen GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); 750b42fe9caSJoonas Lahtinen 751b42fe9caSJoonas Lahtinen if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 752b42fe9caSJoonas Lahtinen ret = -EBUSY; 75331c7effaSChris Wilson goto err_unpin; 754b42fe9caSJoonas Lahtinen } 755b42fe9caSJoonas Lahtinen 756b42fe9caSJoonas Lahtinen if ((bound & I915_VMA_BIND_MASK) == 0) { 757b42fe9caSJoonas Lahtinen ret = i915_vma_insert(vma, size, alignment, flags); 758b42fe9caSJoonas Lahtinen if (ret) 75931c7effaSChris Wilson goto err_unpin; 760b42fe9caSJoonas Lahtinen } 761d36caeeaSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 762b42fe9caSJoonas Lahtinen 763520ea7c5SChris Wilson ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); 764b42fe9caSJoonas Lahtinen if (ret) 76531c7effaSChris Wilson goto err_remove; 766b42fe9caSJoonas Lahtinen 767d36caeeaSChris Wilson GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); 768d36caeeaSChris Wilson 769b42fe9caSJoonas Lahtinen if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 770b42fe9caSJoonas Lahtinen __i915_vma_set_map_and_fenceable(vma); 771b42fe9caSJoonas Lahtinen 772b42fe9caSJoonas Lahtinen GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 773b42fe9caSJoonas Lahtinen return 0; 774b42fe9caSJoonas Lahtinen 77531c7effaSChris Wilson err_remove: 77631c7effaSChris Wilson if ((bound & I915_VMA_BIND_MASK) == 0) { 77731c7effaSChris Wilson i915_vma_remove(vma); 778fa3f46afSMatthew Auld GEM_BUG_ON(vma->pages); 779d36caeeaSChris Wilson GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); 78031c7effaSChris Wilson } 78131c7effaSChris Wilson err_unpin: 782b42fe9caSJoonas Lahtinen __i915_vma_unpin(vma); 783b42fe9caSJoonas Lahtinen return ret; 784b42fe9caSJoonas Lahtinen } 785b42fe9caSJoonas Lahtinen 7863365e226SChris Wilson void i915_vma_close(struct i915_vma *vma) 7873365e226SChris Wilson { 7883365e226SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 7893365e226SChris Wilson 7903365e226SChris Wilson GEM_BUG_ON(i915_vma_is_closed(vma)); 7913365e226SChris Wilson vma->flags |= I915_VMA_CLOSED; 7923365e226SChris Wilson 7933365e226SChris Wilson /* 7943365e226SChris Wilson * We defer actually closing, unbinding and destroying the VMA until 7953365e226SChris Wilson * the next idle point, or if the object is freed in the meantime. By 7963365e226SChris Wilson * postponing the unbind, we allow for it to be resurrected by the 7973365e226SChris Wilson * client, avoiding the work required to rebind the VMA. This is 7983365e226SChris Wilson * advantageous for DRI, where the client/server pass objects 7993365e226SChris Wilson * between themselves, temporarily opening a local VMA to the 8003365e226SChris Wilson * object, and then closing it again. The same object is then reused 8013365e226SChris Wilson * on the next frame (or two, depending on the depth of the swap queue) 8023365e226SChris Wilson * causing us to rebind the VMA once more. This ends up being a lot 8033365e226SChris Wilson * of wasted work for the steady state. 8043365e226SChris Wilson */ 8053365e226SChris Wilson list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma); 8063365e226SChris Wilson } 8073365e226SChris Wilson 8083365e226SChris Wilson void i915_vma_reopen(struct i915_vma *vma) 8093365e226SChris Wilson { 8103365e226SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 8113365e226SChris Wilson 8123365e226SChris Wilson if (vma->flags & I915_VMA_CLOSED) { 8133365e226SChris Wilson vma->flags &= ~I915_VMA_CLOSED; 8143365e226SChris Wilson list_del(&vma->closed_link); 8153365e226SChris Wilson } 8163365e226SChris Wilson } 8173365e226SChris Wilson 8183365e226SChris Wilson static void __i915_vma_destroy(struct i915_vma *vma) 819b42fe9caSJoonas Lahtinen { 820b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->node.allocated); 821b42fe9caSJoonas Lahtinen GEM_BUG_ON(vma->fence); 822b42fe9caSJoonas Lahtinen 82321950ee7SChris Wilson GEM_BUG_ON(i915_active_request_isset(&vma->last_fence)); 8247a3bc034SChris Wilson 82509d7e46bSChris Wilson mutex_lock(&vma->vm->mutex); 826b42fe9caSJoonas Lahtinen list_del(&vma->vm_link); 82709d7e46bSChris Wilson mutex_unlock(&vma->vm->mutex); 82809d7e46bSChris Wilson 829528cbd17SChris Wilson if (vma->obj) { 830528cbd17SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 831528cbd17SChris Wilson 832528cbd17SChris Wilson spin_lock(&obj->vma.lock); 833528cbd17SChris Wilson list_del(&vma->obj_link); 834528cbd17SChris Wilson rb_erase(&vma->obj_node, &vma->obj->vma.tree); 835528cbd17SChris Wilson spin_unlock(&obj->vma.lock); 836528cbd17SChris Wilson } 837010e3e68SChris Wilson 83864d6c500SChris Wilson i915_active_fini(&vma->active); 8395c3f8c22SChris Wilson 84013f1bfd3SChris Wilson i915_vma_free(vma); 841b42fe9caSJoonas Lahtinen } 842b42fe9caSJoonas Lahtinen 8433365e226SChris Wilson void i915_vma_destroy(struct i915_vma *vma) 844b42fe9caSJoonas Lahtinen { 8453365e226SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 846b42fe9caSJoonas Lahtinen 8473365e226SChris Wilson GEM_BUG_ON(i915_vma_is_active(vma)); 8483365e226SChris Wilson GEM_BUG_ON(i915_vma_is_pinned(vma)); 849b42fe9caSJoonas Lahtinen 8503365e226SChris Wilson if (i915_vma_is_closed(vma)) 8513365e226SChris Wilson list_del(&vma->closed_link); 8523365e226SChris Wilson 853b42fe9caSJoonas Lahtinen WARN_ON(i915_vma_unbind(vma)); 8543365e226SChris Wilson __i915_vma_destroy(vma); 8553365e226SChris Wilson } 8563365e226SChris Wilson 8573365e226SChris Wilson void i915_vma_parked(struct drm_i915_private *i915) 8583365e226SChris Wilson { 8593365e226SChris Wilson struct i915_vma *vma, *next; 8603365e226SChris Wilson 8613365e226SChris Wilson list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { 8623365e226SChris Wilson GEM_BUG_ON(!i915_vma_is_closed(vma)); 8633365e226SChris Wilson i915_vma_destroy(vma); 8643365e226SChris Wilson } 8653365e226SChris Wilson 8663365e226SChris Wilson GEM_BUG_ON(!list_empty(&i915->gt.closed_vma)); 867b42fe9caSJoonas Lahtinen } 868b42fe9caSJoonas Lahtinen 869b42fe9caSJoonas Lahtinen static void __i915_vma_iounmap(struct i915_vma *vma) 870b42fe9caSJoonas Lahtinen { 871b42fe9caSJoonas Lahtinen GEM_BUG_ON(i915_vma_is_pinned(vma)); 872b42fe9caSJoonas Lahtinen 873b42fe9caSJoonas Lahtinen if (vma->iomap == NULL) 874b42fe9caSJoonas Lahtinen return; 875b42fe9caSJoonas Lahtinen 876b42fe9caSJoonas Lahtinen io_mapping_unmap(vma->iomap); 877b42fe9caSJoonas Lahtinen vma->iomap = NULL; 878b42fe9caSJoonas Lahtinen } 879b42fe9caSJoonas Lahtinen 880a65adaf8SChris Wilson void i915_vma_revoke_mmap(struct i915_vma *vma) 881a65adaf8SChris Wilson { 882a65adaf8SChris Wilson struct drm_vma_offset_node *node = &vma->obj->base.vma_node; 883a65adaf8SChris Wilson u64 vma_offset; 884a65adaf8SChris Wilson 885a65adaf8SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 886a65adaf8SChris Wilson 887a65adaf8SChris Wilson if (!i915_vma_has_userfault(vma)) 888a65adaf8SChris Wilson return; 889a65adaf8SChris Wilson 890a65adaf8SChris Wilson GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 891a65adaf8SChris Wilson GEM_BUG_ON(!vma->obj->userfault_count); 892a65adaf8SChris Wilson 893a65adaf8SChris Wilson vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; 894a65adaf8SChris Wilson unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, 895a65adaf8SChris Wilson drm_vma_node_offset_addr(node) + vma_offset, 896a65adaf8SChris Wilson vma->size, 897a65adaf8SChris Wilson 1); 898a65adaf8SChris Wilson 899a65adaf8SChris Wilson i915_vma_unset_userfault(vma); 900a65adaf8SChris Wilson if (!--vma->obj->userfault_count) 901a65adaf8SChris Wilson list_del(&vma->obj->userfault_link); 902a65adaf8SChris Wilson } 903a65adaf8SChris Wilson 904e6bb1d7fSChris Wilson static void export_fence(struct i915_vma *vma, 905e6bb1d7fSChris Wilson struct i915_request *rq, 906e6bb1d7fSChris Wilson unsigned int flags) 907e6bb1d7fSChris Wilson { 908e6bb1d7fSChris Wilson struct reservation_object *resv = vma->resv; 909e6bb1d7fSChris Wilson 910e6bb1d7fSChris Wilson /* 911e6bb1d7fSChris Wilson * Ignore errors from failing to allocate the new fence, we can't 912e6bb1d7fSChris Wilson * handle an error right now. Worst case should be missed 913e6bb1d7fSChris Wilson * synchronisation leading to rendering corruption. 914e6bb1d7fSChris Wilson */ 915e6bb1d7fSChris Wilson reservation_object_lock(resv, NULL); 916e6bb1d7fSChris Wilson if (flags & EXEC_OBJECT_WRITE) 917e6bb1d7fSChris Wilson reservation_object_add_excl_fence(resv, &rq->fence); 918ca05359fSChristian König else if (reservation_object_reserve_shared(resv, 1) == 0) 919e6bb1d7fSChris Wilson reservation_object_add_shared_fence(resv, &rq->fence); 920e6bb1d7fSChris Wilson reservation_object_unlock(resv); 921e6bb1d7fSChris Wilson } 922e6bb1d7fSChris Wilson 923e6bb1d7fSChris Wilson int i915_vma_move_to_active(struct i915_vma *vma, 924e6bb1d7fSChris Wilson struct i915_request *rq, 925e6bb1d7fSChris Wilson unsigned int flags) 926e6bb1d7fSChris Wilson { 927e6bb1d7fSChris Wilson struct drm_i915_gem_object *obj = vma->obj; 928e6bb1d7fSChris Wilson 929e6bb1d7fSChris Wilson lockdep_assert_held(&rq->i915->drm.struct_mutex); 930e6bb1d7fSChris Wilson GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 931e6bb1d7fSChris Wilson 932e6bb1d7fSChris Wilson /* 933e6bb1d7fSChris Wilson * Add a reference if we're newly entering the active list. 934e6bb1d7fSChris Wilson * The order in which we add operations to the retirement queue is 935e6bb1d7fSChris Wilson * vital here: mark_active adds to the start of the callback list, 936e6bb1d7fSChris Wilson * such that subsequent callbacks are called first. Therefore we 937e6bb1d7fSChris Wilson * add the active reference first and queue for it to be dropped 938e6bb1d7fSChris Wilson * *last*. 939e6bb1d7fSChris Wilson */ 94064d6c500SChris Wilson if (!vma->active.count) 9415c3f8c22SChris Wilson obj->active_count++; 94264d6c500SChris Wilson 94364d6c500SChris Wilson if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) { 94464d6c500SChris Wilson if (!vma->active.count) 94564d6c500SChris Wilson obj->active_count--; 94664d6c500SChris Wilson return -ENOMEM; 94764d6c500SChris Wilson } 94864d6c500SChris Wilson 9495c3f8c22SChris Wilson GEM_BUG_ON(!i915_vma_is_active(vma)); 9505c3f8c22SChris Wilson GEM_BUG_ON(!obj->active_count); 951e6bb1d7fSChris Wilson 952e6bb1d7fSChris Wilson obj->write_domain = 0; 953e6bb1d7fSChris Wilson if (flags & EXEC_OBJECT_WRITE) { 954e6bb1d7fSChris Wilson obj->write_domain = I915_GEM_DOMAIN_RENDER; 955e6bb1d7fSChris Wilson 956e6bb1d7fSChris Wilson if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 95721950ee7SChris Wilson __i915_active_request_set(&obj->frontbuffer_write, rq); 958e6bb1d7fSChris Wilson 959e6bb1d7fSChris Wilson obj->read_domains = 0; 960e6bb1d7fSChris Wilson } 961e6bb1d7fSChris Wilson obj->read_domains |= I915_GEM_GPU_DOMAINS; 962e6bb1d7fSChris Wilson 963e6bb1d7fSChris Wilson if (flags & EXEC_OBJECT_NEEDS_FENCE) 96421950ee7SChris Wilson __i915_active_request_set(&vma->last_fence, rq); 965e6bb1d7fSChris Wilson 966e6bb1d7fSChris Wilson export_fence(vma, rq, flags); 967e6bb1d7fSChris Wilson return 0; 968e6bb1d7fSChris Wilson } 969e6bb1d7fSChris Wilson 970b42fe9caSJoonas Lahtinen int i915_vma_unbind(struct i915_vma *vma) 971b42fe9caSJoonas Lahtinen { 972b42fe9caSJoonas Lahtinen int ret; 973b42fe9caSJoonas Lahtinen 974520ea7c5SChris Wilson lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 975b42fe9caSJoonas Lahtinen 976520ea7c5SChris Wilson /* 977520ea7c5SChris Wilson * First wait upon any activity as retiring the request may 978b42fe9caSJoonas Lahtinen * have side-effects such as unpinning or even unbinding this vma. 979b42fe9caSJoonas Lahtinen */ 9807f017b19SChris Wilson might_sleep(); 9815c3f8c22SChris Wilson if (i915_vma_is_active(vma)) { 982520ea7c5SChris Wilson /* 983520ea7c5SChris Wilson * When a closed VMA is retired, it is unbound - eek. 984b42fe9caSJoonas Lahtinen * In order to prevent it from being recursively closed, 985b42fe9caSJoonas Lahtinen * take a pin on the vma so that the second unbind is 986b42fe9caSJoonas Lahtinen * aborted. 987b42fe9caSJoonas Lahtinen * 988b42fe9caSJoonas Lahtinen * Even more scary is that the retire callback may free 989b42fe9caSJoonas Lahtinen * the object (last active vma). To prevent the explosion 990b42fe9caSJoonas Lahtinen * we defer the actual object free to a worker that can 991b42fe9caSJoonas Lahtinen * only proceed once it acquires the struct_mutex (which 992b42fe9caSJoonas Lahtinen * we currently hold, therefore it cannot free this object 993b42fe9caSJoonas Lahtinen * before we are finished). 994b42fe9caSJoonas Lahtinen */ 995b42fe9caSJoonas Lahtinen __i915_vma_pin(vma); 996b42fe9caSJoonas Lahtinen 99764d6c500SChris Wilson ret = i915_active_wait(&vma->active); 9988b293eb5SChris Wilson if (ret) 9998b293eb5SChris Wilson goto unpin; 10008b293eb5SChris Wilson 100121950ee7SChris Wilson ret = i915_active_request_retire(&vma->last_fence, 1002760a898dSChris Wilson &vma->vm->i915->drm.struct_mutex); 10035c3f8c22SChris Wilson unpin: 1004b42fe9caSJoonas Lahtinen __i915_vma_unpin(vma); 1005b42fe9caSJoonas Lahtinen if (ret) 1006b42fe9caSJoonas Lahtinen return ret; 1007b42fe9caSJoonas Lahtinen } 10087a3bc034SChris Wilson GEM_BUG_ON(i915_vma_is_active(vma)); 1009b42fe9caSJoonas Lahtinen 101010195b1eSChris Wilson if (i915_vma_is_pinned(vma)) { 101110195b1eSChris Wilson vma_print_allocator(vma, "is pinned"); 1012b42fe9caSJoonas Lahtinen return -EBUSY; 101310195b1eSChris Wilson } 1014b42fe9caSJoonas Lahtinen 1015b42fe9caSJoonas Lahtinen if (!drm_mm_node_allocated(&vma->node)) 10163365e226SChris Wilson return 0; 1017b42fe9caSJoonas Lahtinen 1018b42fe9caSJoonas Lahtinen if (i915_vma_is_map_and_fenceable(vma)) { 10197125397bSChris Wilson /* 10207125397bSChris Wilson * Check that we have flushed all writes through the GGTT 10217125397bSChris Wilson * before the unbind, other due to non-strict nature of those 10227125397bSChris Wilson * indirect writes they may end up referencing the GGTT PTE 10237125397bSChris Wilson * after the unbind. 10247125397bSChris Wilson */ 10257125397bSChris Wilson i915_vma_flush_writes(vma); 10267125397bSChris Wilson GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); 10277125397bSChris Wilson 1028b42fe9caSJoonas Lahtinen /* release the fence reg _after_ flushing */ 1029b42fe9caSJoonas Lahtinen ret = i915_vma_put_fence(vma); 1030b42fe9caSJoonas Lahtinen if (ret) 1031b42fe9caSJoonas Lahtinen return ret; 1032b42fe9caSJoonas Lahtinen 1033b42fe9caSJoonas Lahtinen /* Force a pagefault for domain tracking on next user access */ 1034a65adaf8SChris Wilson i915_vma_revoke_mmap(vma); 1035b42fe9caSJoonas Lahtinen 1036b42fe9caSJoonas Lahtinen __i915_vma_iounmap(vma); 1037b42fe9caSJoonas Lahtinen vma->flags &= ~I915_VMA_CAN_FENCE; 1038b42fe9caSJoonas Lahtinen } 1039a65adaf8SChris Wilson GEM_BUG_ON(vma->fence); 1040a65adaf8SChris Wilson GEM_BUG_ON(i915_vma_has_userfault(vma)); 1041b42fe9caSJoonas Lahtinen 1042b42fe9caSJoonas Lahtinen if (likely(!vma->vm->closed)) { 1043b42fe9caSJoonas Lahtinen trace_i915_vma_unbind(vma); 104493f2cde2SChris Wilson vma->ops->unbind_vma(vma); 1045b42fe9caSJoonas Lahtinen } 1046b42fe9caSJoonas Lahtinen vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 1047b42fe9caSJoonas Lahtinen 104831c7effaSChris Wilson i915_vma_remove(vma); 1049b42fe9caSJoonas Lahtinen 1050b42fe9caSJoonas Lahtinen return 0; 1051b42fe9caSJoonas Lahtinen } 1052b42fe9caSJoonas Lahtinen 1053e3c7a1c5SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1054e3c7a1c5SChris Wilson #include "selftests/i915_vma.c" 1055e3c7a1c5SChris Wilson #endif 105613f1bfd3SChris Wilson 105713f1bfd3SChris Wilson int __init i915_global_vma_init(void) 105813f1bfd3SChris Wilson { 105913f1bfd3SChris Wilson global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 106013f1bfd3SChris Wilson if (!global.slab_vmas) 106113f1bfd3SChris Wilson return -ENOMEM; 106213f1bfd3SChris Wilson 106313f1bfd3SChris Wilson return 0; 106413f1bfd3SChris Wilson } 106513f1bfd3SChris Wilson 106613f1bfd3SChris Wilson void i915_global_vma_shrink(void) 106713f1bfd3SChris Wilson { 106813f1bfd3SChris Wilson kmem_cache_shrink(global.slab_vmas); 106913f1bfd3SChris Wilson } 107013f1bfd3SChris Wilson 107113f1bfd3SChris Wilson void i915_global_vma_exit(void) 107213f1bfd3SChris Wilson { 107313f1bfd3SChris Wilson kmem_cache_destroy(global.slab_vmas); 107413f1bfd3SChris Wilson } 1075