1 /* 2 * Copyright © 2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 27 #include "display/intel_frontbuffer.h" 28 #include "i915_drv.h" 29 #include "i915_gem_clflush.h" 30 #include "i915_gem_context.h" 31 #include "i915_gem_mman.h" 32 #include "i915_gem_object.h" 33 #include "i915_globals.h" 34 #include "i915_memcpy.h" 35 #include "i915_trace.h" 36 37 static struct i915_global_object { 38 struct i915_global base; 39 struct kmem_cache *slab_objects; 40 } global; 41 42 static const struct drm_gem_object_funcs i915_gem_object_funcs; 43 44 struct drm_i915_gem_object *i915_gem_object_alloc(void) 45 { 46 struct drm_i915_gem_object *obj; 47 48 obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); 49 if (!obj) 50 return NULL; 51 obj->base.funcs = &i915_gem_object_funcs; 52 53 return obj; 54 } 55 56 void i915_gem_object_free(struct drm_i915_gem_object *obj) 57 { 58 return kmem_cache_free(global.slab_objects, obj); 59 } 60 61 void i915_gem_object_init(struct drm_i915_gem_object *obj, 62 const struct drm_i915_gem_object_ops *ops, 63 struct lock_class_key *key, unsigned flags) 64 { 65 spin_lock_init(&obj->vma.lock); 66 INIT_LIST_HEAD(&obj->vma.list); 67 68 INIT_LIST_HEAD(&obj->mm.link); 69 70 INIT_LIST_HEAD(&obj->lut_list); 71 spin_lock_init(&obj->lut_lock); 72 73 spin_lock_init(&obj->mmo.lock); 74 obj->mmo.offsets = RB_ROOT; 75 76 init_rcu_head(&obj->rcu); 77 78 obj->ops = ops; 79 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); 80 obj->flags = flags; 81 82 obj->mm.madv = I915_MADV_WILLNEED; 83 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 84 mutex_init(&obj->mm.get_page.lock); 85 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); 86 mutex_init(&obj->mm.get_dma_page.lock); 87 } 88 89 /** 90 * Mark up the object's coherency levels for a given cache_level 91 * @obj: #drm_i915_gem_object 92 * @cache_level: cache level 93 */ 94 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 95 unsigned int cache_level) 96 { 97 obj->cache_level = cache_level; 98 99 if (cache_level != I915_CACHE_NONE) 100 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | 101 I915_BO_CACHE_COHERENT_FOR_WRITE); 102 else if (HAS_LLC(to_i915(obj->base.dev))) 103 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; 104 else 105 obj->cache_coherent = 0; 106 107 obj->cache_dirty = 108 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); 109 } 110 111 static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 112 { 113 struct drm_i915_gem_object *obj = to_intel_bo(gem); 114 struct drm_i915_file_private *fpriv = file->driver_priv; 115 struct i915_lut_handle bookmark = {}; 116 struct i915_mmap_offset *mmo, *mn; 117 struct i915_lut_handle *lut, *ln; 118 LIST_HEAD(close); 119 120 spin_lock(&obj->lut_lock); 121 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { 122 struct i915_gem_context *ctx = lut->ctx; 123 124 if (ctx && ctx->file_priv == fpriv) { 125 i915_gem_context_get(ctx); 126 list_move(&lut->obj_link, &close); 127 } 128 129 /* Break long locks, and carefully continue on from this spot */ 130 if (&ln->obj_link != &obj->lut_list) { 131 list_add_tail(&bookmark.obj_link, &ln->obj_link); 132 if (cond_resched_lock(&obj->lut_lock)) 133 list_safe_reset_next(&bookmark, ln, obj_link); 134 __list_del_entry(&bookmark.obj_link); 135 } 136 } 137 spin_unlock(&obj->lut_lock); 138 139 spin_lock(&obj->mmo.lock); 140 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) 141 drm_vma_node_revoke(&mmo->vma_node, file); 142 spin_unlock(&obj->mmo.lock); 143 144 list_for_each_entry_safe(lut, ln, &close, obj_link) { 145 struct i915_gem_context *ctx = lut->ctx; 146 struct i915_vma *vma; 147 148 /* 149 * We allow the process to have multiple handles to the same 150 * vma, in the same fd namespace, by virtue of flink/open. 151 */ 152 153 mutex_lock(&ctx->lut_mutex); 154 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); 155 if (vma) { 156 GEM_BUG_ON(vma->obj != obj); 157 GEM_BUG_ON(!atomic_read(&vma->open_count)); 158 i915_vma_close(vma); 159 } 160 mutex_unlock(&ctx->lut_mutex); 161 162 i915_gem_context_put(lut->ctx); 163 i915_lut_handle_free(lut); 164 i915_gem_object_put(obj); 165 } 166 } 167 168 static void __i915_gem_free_object_rcu(struct rcu_head *head) 169 { 170 struct drm_i915_gem_object *obj = 171 container_of(head, typeof(*obj), rcu); 172 struct drm_i915_private *i915 = to_i915(obj->base.dev); 173 174 dma_resv_fini(&obj->base._resv); 175 i915_gem_object_free(obj); 176 177 GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); 178 atomic_dec(&i915->mm.free_count); 179 } 180 181 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) 182 { 183 /* Skip serialisation and waking the device if known to be not used. */ 184 185 if (obj->userfault_count) 186 i915_gem_object_release_mmap_gtt(obj); 187 188 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { 189 struct i915_mmap_offset *mmo, *mn; 190 191 i915_gem_object_release_mmap_offset(obj); 192 193 rbtree_postorder_for_each_entry_safe(mmo, mn, 194 &obj->mmo.offsets, 195 offset) { 196 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, 197 &mmo->vma_node); 198 kfree(mmo); 199 } 200 obj->mmo.offsets = RB_ROOT; 201 } 202 } 203 204 static void __i915_gem_free_objects(struct drm_i915_private *i915, 205 struct llist_node *freed) 206 { 207 struct drm_i915_gem_object *obj, *on; 208 209 llist_for_each_entry_safe(obj, on, freed, freed) { 210 trace_i915_gem_object_destroy(obj); 211 212 if (!list_empty(&obj->vma.list)) { 213 struct i915_vma *vma; 214 215 /* 216 * Note that the vma keeps an object reference while 217 * it is active, so it *should* not sleep while we 218 * destroy it. Our debug code errs insits it *might*. 219 * For the moment, play along. 220 */ 221 spin_lock(&obj->vma.lock); 222 while ((vma = list_first_entry_or_null(&obj->vma.list, 223 struct i915_vma, 224 obj_link))) { 225 GEM_BUG_ON(vma->obj != obj); 226 spin_unlock(&obj->vma.lock); 227 228 __i915_vma_put(vma); 229 230 spin_lock(&obj->vma.lock); 231 } 232 spin_unlock(&obj->vma.lock); 233 } 234 235 __i915_gem_object_free_mmaps(obj); 236 237 GEM_BUG_ON(!list_empty(&obj->lut_list)); 238 239 atomic_set(&obj->mm.pages_pin_count, 0); 240 __i915_gem_object_put_pages(obj); 241 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 242 bitmap_free(obj->bit_17); 243 244 if (obj->base.import_attach) 245 drm_prime_gem_destroy(&obj->base, NULL); 246 247 drm_gem_free_mmap_offset(&obj->base); 248 249 if (obj->ops->release) 250 obj->ops->release(obj); 251 252 /* But keep the pointer alive for RCU-protected lookups */ 253 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 254 cond_resched(); 255 } 256 } 257 258 void i915_gem_flush_free_objects(struct drm_i915_private *i915) 259 { 260 struct llist_node *freed = llist_del_all(&i915->mm.free_list); 261 262 if (unlikely(freed)) 263 __i915_gem_free_objects(i915, freed); 264 } 265 266 static void __i915_gem_free_work(struct work_struct *work) 267 { 268 struct drm_i915_private *i915 = 269 container_of(work, struct drm_i915_private, mm.free_work); 270 271 i915_gem_flush_free_objects(i915); 272 } 273 274 static void i915_gem_free_object(struct drm_gem_object *gem_obj) 275 { 276 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 277 struct drm_i915_private *i915 = to_i915(obj->base.dev); 278 279 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj)); 280 281 /* 282 * Before we free the object, make sure any pure RCU-only 283 * read-side critical sections are complete, e.g. 284 * i915_gem_busy_ioctl(). For the corresponding synchronized 285 * lookup see i915_gem_object_lookup_rcu(). 286 */ 287 atomic_inc(&i915->mm.free_count); 288 289 /* 290 * This serializes freeing with the shrinker. Since the free 291 * is delayed, first by RCU then by the workqueue, we want the 292 * shrinker to be able to free pages of unreferenced objects, 293 * or else we may oom whilst there are plenty of deferred 294 * freed objects. 295 */ 296 i915_gem_object_make_unshrinkable(obj); 297 298 /* 299 * Since we require blocking on struct_mutex to unbind the freed 300 * object from the GPU before releasing resources back to the 301 * system, we can not do that directly from the RCU callback (which may 302 * be a softirq context), but must instead then defer that work onto a 303 * kthread. We use the RCU callback rather than move the freed object 304 * directly onto the work queue so that we can mix between using the 305 * worker and performing frees directly from subsequent allocations for 306 * crude but effective memory throttling. 307 */ 308 if (llist_add(&obj->freed, &i915->mm.free_list)) 309 queue_work(i915->wq, &i915->mm.free_work); 310 } 311 312 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 313 enum fb_op_origin origin) 314 { 315 struct intel_frontbuffer *front; 316 317 front = __intel_frontbuffer_get(obj); 318 if (front) { 319 intel_frontbuffer_flush(front, origin); 320 intel_frontbuffer_put(front); 321 } 322 } 323 324 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 325 enum fb_op_origin origin) 326 { 327 struct intel_frontbuffer *front; 328 329 front = __intel_frontbuffer_get(obj); 330 if (front) { 331 intel_frontbuffer_invalidate(front, origin); 332 intel_frontbuffer_put(front); 333 } 334 } 335 336 static void 337 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) 338 { 339 void *src_map; 340 void *src_ptr; 341 342 src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT)); 343 344 src_ptr = src_map + offset_in_page(offset); 345 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 346 drm_clflush_virt_range(src_ptr, size); 347 memcpy(dst, src_ptr, size); 348 349 kunmap_atomic(src_map); 350 } 351 352 static void 353 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) 354 { 355 void __iomem *src_map; 356 void __iomem *src_ptr; 357 dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT); 358 359 src_map = io_mapping_map_wc(&obj->mm.region->iomap, 360 dma - obj->mm.region->region.start, 361 PAGE_SIZE); 362 363 src_ptr = src_map + offset_in_page(offset); 364 if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size)) 365 memcpy_fromio(dst, src_ptr, size); 366 367 io_mapping_unmap(src_map); 368 } 369 370 /** 371 * i915_gem_object_read_from_page - read data from the page of a GEM object 372 * @obj: GEM object to read from 373 * @offset: offset within the object 374 * @dst: buffer to store the read data 375 * @size: size to read 376 * 377 * Reads data from @obj at the specified offset. The requested region to read 378 * from can't cross a page boundary. The caller must ensure that @obj pages 379 * are pinned and that @obj is synced wrt. any related writes. 380 * 381 * Returns 0 on success or -ENODEV if the type of @obj's backing store is 382 * unsupported. 383 */ 384 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) 385 { 386 GEM_BUG_ON(offset >= obj->base.size); 387 GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size); 388 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 389 390 if (i915_gem_object_has_struct_page(obj)) 391 i915_gem_object_read_from_page_kmap(obj, offset, dst, size); 392 else if (i915_gem_object_has_iomem(obj)) 393 i915_gem_object_read_from_page_iomap(obj, offset, dst, size); 394 else 395 return -ENODEV; 396 397 return 0; 398 } 399 400 void i915_gem_init__objects(struct drm_i915_private *i915) 401 { 402 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); 403 } 404 405 static void i915_global_objects_shrink(void) 406 { 407 kmem_cache_shrink(global.slab_objects); 408 } 409 410 static void i915_global_objects_exit(void) 411 { 412 kmem_cache_destroy(global.slab_objects); 413 } 414 415 static struct i915_global_object global = { { 416 .shrink = i915_global_objects_shrink, 417 .exit = i915_global_objects_exit, 418 } }; 419 420 int __init i915_global_objects_init(void) 421 { 422 global.slab_objects = 423 KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 424 if (!global.slab_objects) 425 return -ENOMEM; 426 427 i915_global_register(&global.base); 428 return 0; 429 } 430 431 static const struct drm_gem_object_funcs i915_gem_object_funcs = { 432 .free = i915_gem_free_object, 433 .close = i915_gem_close_object, 434 .export = i915_gem_prime_export, 435 }; 436 437 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 438 #include "selftests/huge_gem_object.c" 439 #include "selftests/huge_pages.c" 440 #include "selftests/i915_gem_object.c" 441 #include "selftests/i915_gem_coherency.c" 442 #endif 443