1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-buf.h> 29 #include <linux/file.h> 30 #include <linux/fs.h> 31 #include <linux/iosys-map.h> 32 #include <linux/mem_encrypt.h> 33 #include <linux/mm.h> 34 #include <linux/mman.h> 35 #include <linux/module.h> 36 #include <linux/pagemap.h> 37 #include <linux/pagevec.h> 38 #include <linux/shmem_fs.h> 39 #include <linux/slab.h> 40 #include <linux/string_helpers.h> 41 #include <linux/types.h> 42 #include <linux/uaccess.h> 43 44 #include <drm/drm.h> 45 #include <drm/drm_device.h> 46 #include <drm/drm_drv.h> 47 #include <drm/drm_file.h> 48 #include <drm/drm_gem.h> 49 #include <drm/drm_managed.h> 50 #include <drm/drm_print.h> 51 #include <drm/drm_vma_manager.h> 52 53 #include "drm_internal.h" 54 55 /** @file drm_gem.c 56 * 57 * This file provides some of the base ioctls and library routines for 58 * the graphics memory manager implemented by each device driver. 59 * 60 * Because various devices have different requirements in terms of 61 * synchronization and migration strategies, implementing that is left up to 62 * the driver, and all that the general API provides should be generic -- 63 * allocating objects, reading/writing data with the cpu, freeing objects. 64 * Even there, platform-dependent optimizations for reading/writing data with 65 * the CPU mean we'll likely hook those out to driver-specific calls. However, 66 * the DRI2 implementation wants to have at least allocate/mmap be generic. 67 * 68 * The goal was to have swap-backed object allocation managed through 69 * struct file. However, file descriptors as handles to a struct file have 70 * two major failings: 71 * - Process limits prevent more than 1024 or so being used at a time by 72 * default. 73 * - Inability to allocate high fds will aggravate the X Server's select() 74 * handling, and likely that of many GL client applications as well. 75 * 76 * This led to a plan of using our own integer IDs (called handles, following 77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 78 * ioctls. The objects themselves will still include the struct file so 79 * that we can transition to fds if the required kernel infrastructure shows 80 * up at a later date, and as our interface with shmfs for memory allocation. 81 */ 82 83 static void 84 drm_gem_init_release(struct drm_device *dev, void *ptr) 85 { 86 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 87 } 88 89 /** 90 * drm_gem_init - Initialize the GEM device fields 91 * @dev: drm_devic structure to initialize 92 */ 93 int 94 drm_gem_init(struct drm_device *dev) 95 { 96 struct drm_vma_offset_manager *vma_offset_manager; 97 98 mutex_init(&dev->object_name_lock); 99 idr_init_base(&dev->object_name_idr, 1); 100 101 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 102 GFP_KERNEL); 103 if (!vma_offset_manager) { 104 DRM_ERROR("out of memory\n"); 105 return -ENOMEM; 106 } 107 108 dev->vma_offset_manager = vma_offset_manager; 109 drm_vma_offset_manager_init(vma_offset_manager, 110 DRM_FILE_PAGE_OFFSET_START, 111 DRM_FILE_PAGE_OFFSET_SIZE); 112 113 return drmm_add_action(dev, drm_gem_init_release, NULL); 114 } 115 116 /** 117 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 118 * @dev: drm_device the object should be initialized for 119 * @obj: drm_gem_object to initialize 120 * @size: object size 121 * 122 * Initialize an already allocated GEM object of the specified size with 123 * shmfs backing store. 124 */ 125 int drm_gem_object_init(struct drm_device *dev, 126 struct drm_gem_object *obj, size_t size) 127 { 128 struct file *filp; 129 130 drm_gem_private_object_init(dev, obj, size); 131 132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 133 if (IS_ERR(filp)) 134 return PTR_ERR(filp); 135 136 obj->filp = filp; 137 138 return 0; 139 } 140 EXPORT_SYMBOL(drm_gem_object_init); 141 142 /** 143 * drm_gem_private_object_init - initialize an allocated private GEM object 144 * @dev: drm_device the object should be initialized for 145 * @obj: drm_gem_object to initialize 146 * @size: object size 147 * 148 * Initialize an already allocated GEM object of the specified size with 149 * no GEM provided backing store. Instead the caller is responsible for 150 * backing the object and handling it. 151 */ 152 void drm_gem_private_object_init(struct drm_device *dev, 153 struct drm_gem_object *obj, size_t size) 154 { 155 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 156 157 obj->dev = dev; 158 obj->filp = NULL; 159 160 kref_init(&obj->refcount); 161 obj->handle_count = 0; 162 obj->size = size; 163 dma_resv_init(&obj->_resv); 164 if (!obj->resv) 165 obj->resv = &obj->_resv; 166 167 drm_vma_node_reset(&obj->vma_node); 168 INIT_LIST_HEAD(&obj->lru_node); 169 } 170 EXPORT_SYMBOL(drm_gem_private_object_init); 171 172 static void 173 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 174 { 175 /* 176 * Note: obj->dma_buf can't disappear as long as we still hold a 177 * handle reference in obj->handle_count. 178 */ 179 mutex_lock(&filp->prime.lock); 180 if (obj->dma_buf) { 181 drm_prime_remove_buf_handle_locked(&filp->prime, 182 obj->dma_buf); 183 } 184 mutex_unlock(&filp->prime.lock); 185 } 186 187 /** 188 * drm_gem_object_handle_free - release resources bound to userspace handles 189 * @obj: GEM object to clean up. 190 * 191 * Called after the last handle to the object has been closed 192 * 193 * Removes any name for the object. Note that this must be 194 * called before drm_gem_object_free or we'll be touching 195 * freed memory 196 */ 197 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 198 { 199 struct drm_device *dev = obj->dev; 200 201 /* Remove any name for this object */ 202 if (obj->name) { 203 idr_remove(&dev->object_name_idr, obj->name); 204 obj->name = 0; 205 } 206 } 207 208 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 209 { 210 /* Unbreak the reference cycle if we have an exported dma_buf. */ 211 if (obj->dma_buf) { 212 dma_buf_put(obj->dma_buf); 213 obj->dma_buf = NULL; 214 } 215 } 216 217 static void 218 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 219 { 220 struct drm_device *dev = obj->dev; 221 bool final = false; 222 223 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 224 return; 225 226 /* 227 * Must bump handle count first as this may be the last 228 * ref, in which case the object would disappear before we 229 * checked for a name 230 */ 231 232 mutex_lock(&dev->object_name_lock); 233 if (--obj->handle_count == 0) { 234 drm_gem_object_handle_free(obj); 235 drm_gem_object_exported_dma_buf_free(obj); 236 final = true; 237 } 238 mutex_unlock(&dev->object_name_lock); 239 240 if (final) 241 drm_gem_object_put(obj); 242 } 243 244 /* 245 * Called at device or object close to release the file's 246 * handle references on objects. 247 */ 248 static int 249 drm_gem_object_release_handle(int id, void *ptr, void *data) 250 { 251 struct drm_file *file_priv = data; 252 struct drm_gem_object *obj = ptr; 253 254 if (obj->funcs->close) 255 obj->funcs->close(obj, file_priv); 256 257 drm_gem_remove_prime_handles(obj, file_priv); 258 drm_vma_node_revoke(&obj->vma_node, file_priv); 259 260 drm_gem_object_handle_put_unlocked(obj); 261 262 return 0; 263 } 264 265 /** 266 * drm_gem_handle_delete - deletes the given file-private handle 267 * @filp: drm file-private structure to use for the handle look up 268 * @handle: userspace handle to delete 269 * 270 * Removes the GEM handle from the @filp lookup table which has been added with 271 * drm_gem_handle_create(). If this is the last handle also cleans up linked 272 * resources like GEM names. 273 */ 274 int 275 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 276 { 277 struct drm_gem_object *obj; 278 279 spin_lock(&filp->table_lock); 280 281 /* Check if we currently have a reference on the object */ 282 obj = idr_replace(&filp->object_idr, NULL, handle); 283 spin_unlock(&filp->table_lock); 284 if (IS_ERR_OR_NULL(obj)) 285 return -EINVAL; 286 287 /* Release driver's reference and decrement refcount. */ 288 drm_gem_object_release_handle(handle, obj, filp); 289 290 /* And finally make the handle available for future allocations. */ 291 spin_lock(&filp->table_lock); 292 idr_remove(&filp->object_idr, handle); 293 spin_unlock(&filp->table_lock); 294 295 return 0; 296 } 297 EXPORT_SYMBOL(drm_gem_handle_delete); 298 299 /** 300 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 301 * @file: drm file-private structure containing the gem object 302 * @dev: corresponding drm_device 303 * @handle: gem object handle 304 * @offset: return location for the fake mmap offset 305 * 306 * This implements the &drm_driver.dumb_map_offset kms driver callback for 307 * drivers which use gem to manage their backing storage. 308 * 309 * Returns: 310 * 0 on success or a negative error code on failure. 311 */ 312 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 313 u32 handle, u64 *offset) 314 { 315 struct drm_gem_object *obj; 316 int ret; 317 318 obj = drm_gem_object_lookup(file, handle); 319 if (!obj) 320 return -ENOENT; 321 322 /* Don't allow imported objects to be mapped */ 323 if (obj->import_attach) { 324 ret = -EINVAL; 325 goto out; 326 } 327 328 ret = drm_gem_create_mmap_offset(obj); 329 if (ret) 330 goto out; 331 332 *offset = drm_vma_node_offset_addr(&obj->vma_node); 333 out: 334 drm_gem_object_put(obj); 335 336 return ret; 337 } 338 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 339 340 int drm_gem_dumb_destroy(struct drm_file *file, 341 struct drm_device *dev, 342 u32 handle) 343 { 344 return drm_gem_handle_delete(file, handle); 345 } 346 347 /** 348 * drm_gem_handle_create_tail - internal functions to create a handle 349 * @file_priv: drm file-private structure to register the handle for 350 * @obj: object to register 351 * @handlep: pointer to return the created handle to the caller 352 * 353 * This expects the &drm_device.object_name_lock to be held already and will 354 * drop it before returning. Used to avoid races in establishing new handles 355 * when importing an object from either an flink name or a dma-buf. 356 * 357 * Handles must be release again through drm_gem_handle_delete(). This is done 358 * when userspace closes @file_priv for all attached handles, or through the 359 * GEM_CLOSE ioctl for individual handles. 360 */ 361 int 362 drm_gem_handle_create_tail(struct drm_file *file_priv, 363 struct drm_gem_object *obj, 364 u32 *handlep) 365 { 366 struct drm_device *dev = obj->dev; 367 u32 handle; 368 int ret; 369 370 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 371 if (obj->handle_count++ == 0) 372 drm_gem_object_get(obj); 373 374 /* 375 * Get the user-visible handle using idr. Preload and perform 376 * allocation under our spinlock. 377 */ 378 idr_preload(GFP_KERNEL); 379 spin_lock(&file_priv->table_lock); 380 381 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 382 383 spin_unlock(&file_priv->table_lock); 384 idr_preload_end(); 385 386 mutex_unlock(&dev->object_name_lock); 387 if (ret < 0) 388 goto err_unref; 389 390 handle = ret; 391 392 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 393 if (ret) 394 goto err_remove; 395 396 if (obj->funcs->open) { 397 ret = obj->funcs->open(obj, file_priv); 398 if (ret) 399 goto err_revoke; 400 } 401 402 *handlep = handle; 403 return 0; 404 405 err_revoke: 406 drm_vma_node_revoke(&obj->vma_node, file_priv); 407 err_remove: 408 spin_lock(&file_priv->table_lock); 409 idr_remove(&file_priv->object_idr, handle); 410 spin_unlock(&file_priv->table_lock); 411 err_unref: 412 drm_gem_object_handle_put_unlocked(obj); 413 return ret; 414 } 415 416 /** 417 * drm_gem_handle_create - create a gem handle for an object 418 * @file_priv: drm file-private structure to register the handle for 419 * @obj: object to register 420 * @handlep: pointer to return the created handle to the caller 421 * 422 * Create a handle for this object. This adds a handle reference to the object, 423 * which includes a regular reference count. Callers will likely want to 424 * dereference the object afterwards. 425 * 426 * Since this publishes @obj to userspace it must be fully set up by this point, 427 * drivers must call this last in their buffer object creation callbacks. 428 */ 429 int drm_gem_handle_create(struct drm_file *file_priv, 430 struct drm_gem_object *obj, 431 u32 *handlep) 432 { 433 mutex_lock(&obj->dev->object_name_lock); 434 435 return drm_gem_handle_create_tail(file_priv, obj, handlep); 436 } 437 EXPORT_SYMBOL(drm_gem_handle_create); 438 439 440 /** 441 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 442 * @obj: obj in question 443 * 444 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 445 * 446 * Note that drm_gem_object_release() already calls this function, so drivers 447 * don't have to take care of releasing the mmap offset themselves when freeing 448 * the GEM object. 449 */ 450 void 451 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 452 { 453 struct drm_device *dev = obj->dev; 454 455 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 456 } 457 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 458 459 /** 460 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 461 * @obj: obj in question 462 * @size: the virtual size 463 * 464 * GEM memory mapping works by handing back to userspace a fake mmap offset 465 * it can use in a subsequent mmap(2) call. The DRM core code then looks 466 * up the object based on the offset and sets up the various memory mapping 467 * structures. 468 * 469 * This routine allocates and attaches a fake offset for @obj, in cases where 470 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 471 * Otherwise just use drm_gem_create_mmap_offset(). 472 * 473 * This function is idempotent and handles an already allocated mmap offset 474 * transparently. Drivers do not need to check for this case. 475 */ 476 int 477 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 478 { 479 struct drm_device *dev = obj->dev; 480 481 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 482 size / PAGE_SIZE); 483 } 484 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 485 486 /** 487 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 488 * @obj: obj in question 489 * 490 * GEM memory mapping works by handing back to userspace a fake mmap offset 491 * it can use in a subsequent mmap(2) call. The DRM core code then looks 492 * up the object based on the offset and sets up the various memory mapping 493 * structures. 494 * 495 * This routine allocates and attaches a fake offset for @obj. 496 * 497 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 498 * the fake offset again. 499 */ 500 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 501 { 502 return drm_gem_create_mmap_offset_size(obj, obj->size); 503 } 504 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 505 506 /* 507 * Move pages to appropriate lru and release the pagevec, decrementing the 508 * ref count of those pages. 509 */ 510 static void drm_gem_check_release_pagevec(struct pagevec *pvec) 511 { 512 check_move_unevictable_pages(pvec); 513 __pagevec_release(pvec); 514 cond_resched(); 515 } 516 517 /** 518 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 519 * from shmem 520 * @obj: obj in question 521 * 522 * This reads the page-array of the shmem-backing storage of the given gem 523 * object. An array of pages is returned. If a page is not allocated or 524 * swapped-out, this will allocate/swap-in the required pages. Note that the 525 * whole object is covered by the page-array and pinned in memory. 526 * 527 * Use drm_gem_put_pages() to release the array and unpin all pages. 528 * 529 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 530 * If you require other GFP-masks, you have to do those allocations yourself. 531 * 532 * Note that you are not allowed to change gfp-zones during runtime. That is, 533 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 534 * set during initialization. If you have special zone constraints, set them 535 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 536 * to keep pages in the required zone during swap-in. 537 * 538 * This function is only valid on objects initialized with 539 * drm_gem_object_init(), but not for those initialized with 540 * drm_gem_private_object_init() only. 541 */ 542 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 543 { 544 struct address_space *mapping; 545 struct page *p, **pages; 546 struct pagevec pvec; 547 int i, npages; 548 549 550 if (WARN_ON(!obj->filp)) 551 return ERR_PTR(-EINVAL); 552 553 /* This is the shared memory object that backs the GEM resource */ 554 mapping = obj->filp->f_mapping; 555 556 /* We already BUG_ON() for non-page-aligned sizes in 557 * drm_gem_object_init(), so we should never hit this unless 558 * driver author is doing something really wrong: 559 */ 560 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 561 562 npages = obj->size >> PAGE_SHIFT; 563 564 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 565 if (pages == NULL) 566 return ERR_PTR(-ENOMEM); 567 568 mapping_set_unevictable(mapping); 569 570 for (i = 0; i < npages; i++) { 571 p = shmem_read_mapping_page(mapping, i); 572 if (IS_ERR(p)) 573 goto fail; 574 pages[i] = p; 575 576 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 577 * correct region during swapin. Note that this requires 578 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 579 * so shmem can relocate pages during swapin if required. 580 */ 581 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 582 (page_to_pfn(p) >= 0x00100000UL)); 583 } 584 585 return pages; 586 587 fail: 588 mapping_clear_unevictable(mapping); 589 pagevec_init(&pvec); 590 while (i--) { 591 if (!pagevec_add(&pvec, pages[i])) 592 drm_gem_check_release_pagevec(&pvec); 593 } 594 if (pagevec_count(&pvec)) 595 drm_gem_check_release_pagevec(&pvec); 596 597 kvfree(pages); 598 return ERR_CAST(p); 599 } 600 EXPORT_SYMBOL(drm_gem_get_pages); 601 602 /** 603 * drm_gem_put_pages - helper to free backing pages for a GEM object 604 * @obj: obj in question 605 * @pages: pages to free 606 * @dirty: if true, pages will be marked as dirty 607 * @accessed: if true, the pages will be marked as accessed 608 */ 609 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 610 bool dirty, bool accessed) 611 { 612 int i, npages; 613 struct address_space *mapping; 614 struct pagevec pvec; 615 616 mapping = file_inode(obj->filp)->i_mapping; 617 mapping_clear_unevictable(mapping); 618 619 /* We already BUG_ON() for non-page-aligned sizes in 620 * drm_gem_object_init(), so we should never hit this unless 621 * driver author is doing something really wrong: 622 */ 623 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 624 625 npages = obj->size >> PAGE_SHIFT; 626 627 pagevec_init(&pvec); 628 for (i = 0; i < npages; i++) { 629 if (!pages[i]) 630 continue; 631 632 if (dirty) 633 set_page_dirty(pages[i]); 634 635 if (accessed) 636 mark_page_accessed(pages[i]); 637 638 /* Undo the reference we took when populating the table */ 639 if (!pagevec_add(&pvec, pages[i])) 640 drm_gem_check_release_pagevec(&pvec); 641 } 642 if (pagevec_count(&pvec)) 643 drm_gem_check_release_pagevec(&pvec); 644 645 kvfree(pages); 646 } 647 EXPORT_SYMBOL(drm_gem_put_pages); 648 649 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 650 struct drm_gem_object **objs) 651 { 652 int i, ret = 0; 653 struct drm_gem_object *obj; 654 655 spin_lock(&filp->table_lock); 656 657 for (i = 0; i < count; i++) { 658 /* Check if we currently have a reference on the object */ 659 obj = idr_find(&filp->object_idr, handle[i]); 660 if (!obj) { 661 ret = -ENOENT; 662 break; 663 } 664 drm_gem_object_get(obj); 665 objs[i] = obj; 666 } 667 spin_unlock(&filp->table_lock); 668 669 return ret; 670 } 671 672 /** 673 * drm_gem_objects_lookup - look up GEM objects from an array of handles 674 * @filp: DRM file private date 675 * @bo_handles: user pointer to array of userspace handle 676 * @count: size of handle array 677 * @objs_out: returned pointer to array of drm_gem_object pointers 678 * 679 * Takes an array of userspace handles and returns a newly allocated array of 680 * GEM objects. 681 * 682 * For a single handle lookup, use drm_gem_object_lookup(). 683 * 684 * Returns: 685 * 686 * @objs filled in with GEM object pointers. Returned GEM objects need to be 687 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 688 * failure. 0 is returned on success. 689 * 690 */ 691 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 692 int count, struct drm_gem_object ***objs_out) 693 { 694 int ret; 695 u32 *handles; 696 struct drm_gem_object **objs; 697 698 if (!count) 699 return 0; 700 701 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 702 GFP_KERNEL | __GFP_ZERO); 703 if (!objs) 704 return -ENOMEM; 705 706 *objs_out = objs; 707 708 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 709 if (!handles) { 710 ret = -ENOMEM; 711 goto out; 712 } 713 714 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 715 ret = -EFAULT; 716 DRM_DEBUG("Failed to copy in GEM handles\n"); 717 goto out; 718 } 719 720 ret = objects_lookup(filp, handles, count, objs); 721 out: 722 kvfree(handles); 723 return ret; 724 725 } 726 EXPORT_SYMBOL(drm_gem_objects_lookup); 727 728 /** 729 * drm_gem_object_lookup - look up a GEM object from its handle 730 * @filp: DRM file private date 731 * @handle: userspace handle 732 * 733 * Returns: 734 * 735 * A reference to the object named by the handle if such exists on @filp, NULL 736 * otherwise. 737 * 738 * If looking up an array of handles, use drm_gem_objects_lookup(). 739 */ 740 struct drm_gem_object * 741 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 742 { 743 struct drm_gem_object *obj = NULL; 744 745 objects_lookup(filp, &handle, 1, &obj); 746 return obj; 747 } 748 EXPORT_SYMBOL(drm_gem_object_lookup); 749 750 /** 751 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 752 * shared and/or exclusive fences. 753 * @filep: DRM file private date 754 * @handle: userspace handle 755 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 756 * @timeout: timeout value in jiffies or zero to return immediately 757 * 758 * Returns: 759 * 760 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 761 * greater than 0 on success. 762 */ 763 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 764 bool wait_all, unsigned long timeout) 765 { 766 long ret; 767 struct drm_gem_object *obj; 768 769 obj = drm_gem_object_lookup(filep, handle); 770 if (!obj) { 771 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 772 return -EINVAL; 773 } 774 775 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 776 true, timeout); 777 if (ret == 0) 778 ret = -ETIME; 779 else if (ret > 0) 780 ret = 0; 781 782 drm_gem_object_put(obj); 783 784 return ret; 785 } 786 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 787 788 /** 789 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 790 * @dev: drm_device 791 * @data: ioctl data 792 * @file_priv: drm file-private structure 793 * 794 * Releases the handle to an mm object. 795 */ 796 int 797 drm_gem_close_ioctl(struct drm_device *dev, void *data, 798 struct drm_file *file_priv) 799 { 800 struct drm_gem_close *args = data; 801 int ret; 802 803 if (!drm_core_check_feature(dev, DRIVER_GEM)) 804 return -EOPNOTSUPP; 805 806 ret = drm_gem_handle_delete(file_priv, args->handle); 807 808 return ret; 809 } 810 811 /** 812 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 813 * @dev: drm_device 814 * @data: ioctl data 815 * @file_priv: drm file-private structure 816 * 817 * Create a global name for an object, returning the name. 818 * 819 * Note that the name does not hold a reference; when the object 820 * is freed, the name goes away. 821 */ 822 int 823 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 824 struct drm_file *file_priv) 825 { 826 struct drm_gem_flink *args = data; 827 struct drm_gem_object *obj; 828 int ret; 829 830 if (!drm_core_check_feature(dev, DRIVER_GEM)) 831 return -EOPNOTSUPP; 832 833 obj = drm_gem_object_lookup(file_priv, args->handle); 834 if (obj == NULL) 835 return -ENOENT; 836 837 mutex_lock(&dev->object_name_lock); 838 /* prevent races with concurrent gem_close. */ 839 if (obj->handle_count == 0) { 840 ret = -ENOENT; 841 goto err; 842 } 843 844 if (!obj->name) { 845 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 846 if (ret < 0) 847 goto err; 848 849 obj->name = ret; 850 } 851 852 args->name = (uint64_t) obj->name; 853 ret = 0; 854 855 err: 856 mutex_unlock(&dev->object_name_lock); 857 drm_gem_object_put(obj); 858 return ret; 859 } 860 861 /** 862 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl 863 * @dev: drm_device 864 * @data: ioctl data 865 * @file_priv: drm file-private structure 866 * 867 * Open an object using the global name, returning a handle and the size. 868 * 869 * This handle (of course) holds a reference to the object, so the object 870 * will not go away until the handle is deleted. 871 */ 872 int 873 drm_gem_open_ioctl(struct drm_device *dev, void *data, 874 struct drm_file *file_priv) 875 { 876 struct drm_gem_open *args = data; 877 struct drm_gem_object *obj; 878 int ret; 879 u32 handle; 880 881 if (!drm_core_check_feature(dev, DRIVER_GEM)) 882 return -EOPNOTSUPP; 883 884 mutex_lock(&dev->object_name_lock); 885 obj = idr_find(&dev->object_name_idr, (int) args->name); 886 if (obj) { 887 drm_gem_object_get(obj); 888 } else { 889 mutex_unlock(&dev->object_name_lock); 890 return -ENOENT; 891 } 892 893 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 894 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 895 if (ret) 896 goto err; 897 898 args->handle = handle; 899 args->size = obj->size; 900 901 err: 902 drm_gem_object_put(obj); 903 return ret; 904 } 905 906 /** 907 * drm_gem_open - initializes GEM file-private structures at devnode open time 908 * @dev: drm_device which is being opened by userspace 909 * @file_private: drm file-private structure to set up 910 * 911 * Called at device open time, sets up the structure for handling refcounting 912 * of mm objects. 913 */ 914 void 915 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 916 { 917 idr_init_base(&file_private->object_idr, 1); 918 spin_lock_init(&file_private->table_lock); 919 } 920 921 /** 922 * drm_gem_release - release file-private GEM resources 923 * @dev: drm_device which is being closed by userspace 924 * @file_private: drm file-private structure to clean up 925 * 926 * Called at close time when the filp is going away. 927 * 928 * Releases any remaining references on objects by this filp. 929 */ 930 void 931 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 932 { 933 idr_for_each(&file_private->object_idr, 934 &drm_gem_object_release_handle, file_private); 935 idr_destroy(&file_private->object_idr); 936 } 937 938 /** 939 * drm_gem_object_release - release GEM buffer object resources 940 * @obj: GEM buffer object 941 * 942 * This releases any structures and resources used by @obj and is the inverse of 943 * drm_gem_object_init(). 944 */ 945 void 946 drm_gem_object_release(struct drm_gem_object *obj) 947 { 948 WARN_ON(obj->dma_buf); 949 950 if (obj->filp) 951 fput(obj->filp); 952 953 dma_resv_fini(&obj->_resv); 954 drm_gem_free_mmap_offset(obj); 955 drm_gem_lru_remove(obj); 956 } 957 EXPORT_SYMBOL(drm_gem_object_release); 958 959 /** 960 * drm_gem_object_free - free a GEM object 961 * @kref: kref of the object to free 962 * 963 * Called after the last reference to the object has been lost. 964 * 965 * Frees the object 966 */ 967 void 968 drm_gem_object_free(struct kref *kref) 969 { 970 struct drm_gem_object *obj = 971 container_of(kref, struct drm_gem_object, refcount); 972 973 if (WARN_ON(!obj->funcs->free)) 974 return; 975 976 obj->funcs->free(obj); 977 } 978 EXPORT_SYMBOL(drm_gem_object_free); 979 980 /** 981 * drm_gem_vm_open - vma->ops->open implementation for GEM 982 * @vma: VM area structure 983 * 984 * This function implements the #vm_operations_struct open() callback for GEM 985 * drivers. This must be used together with drm_gem_vm_close(). 986 */ 987 void drm_gem_vm_open(struct vm_area_struct *vma) 988 { 989 struct drm_gem_object *obj = vma->vm_private_data; 990 991 drm_gem_object_get(obj); 992 } 993 EXPORT_SYMBOL(drm_gem_vm_open); 994 995 /** 996 * drm_gem_vm_close - vma->ops->close implementation for GEM 997 * @vma: VM area structure 998 * 999 * This function implements the #vm_operations_struct close() callback for GEM 1000 * drivers. This must be used together with drm_gem_vm_open(). 1001 */ 1002 void drm_gem_vm_close(struct vm_area_struct *vma) 1003 { 1004 struct drm_gem_object *obj = vma->vm_private_data; 1005 1006 drm_gem_object_put(obj); 1007 } 1008 EXPORT_SYMBOL(drm_gem_vm_close); 1009 1010 /** 1011 * drm_gem_mmap_obj - memory map a GEM object 1012 * @obj: the GEM object to map 1013 * @obj_size: the object size to be mapped, in bytes 1014 * @vma: VMA for the area to be mapped 1015 * 1016 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1017 * vm_ops. Depending on their requirements, GEM objects can either 1018 * provide a fault handler in their vm_ops (in which case any accesses to 1019 * the object will be trapped, to perform migration, GTT binding, surface 1020 * register allocation, or performance monitoring), or mmap the buffer memory 1021 * synchronously after calling drm_gem_mmap_obj. 1022 * 1023 * This function is mainly intended to implement the DMABUF mmap operation, when 1024 * the GEM object is not looked up based on its fake offset. To implement the 1025 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1026 * 1027 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1028 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1029 * callers must verify access restrictions before calling this helper. 1030 * 1031 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1032 * size, or if no vm_ops are provided. 1033 */ 1034 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1035 struct vm_area_struct *vma) 1036 { 1037 int ret; 1038 1039 /* Check for valid size. */ 1040 if (obj_size < vma->vm_end - vma->vm_start) 1041 return -EINVAL; 1042 1043 /* Take a ref for this mapping of the object, so that the fault 1044 * handler can dereference the mmap offset's pointer to the object. 1045 * This reference is cleaned up by the corresponding vm_close 1046 * (which should happen whether the vma was created by this call, or 1047 * by a vm_open due to mremap or partial unmap or whatever). 1048 */ 1049 drm_gem_object_get(obj); 1050 1051 vma->vm_private_data = obj; 1052 vma->vm_ops = obj->funcs->vm_ops; 1053 1054 if (obj->funcs->mmap) { 1055 ret = obj->funcs->mmap(obj, vma); 1056 if (ret) 1057 goto err_drm_gem_object_put; 1058 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1059 } else { 1060 if (!vma->vm_ops) { 1061 ret = -EINVAL; 1062 goto err_drm_gem_object_put; 1063 } 1064 1065 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1066 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1067 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1068 } 1069 1070 return 0; 1071 1072 err_drm_gem_object_put: 1073 drm_gem_object_put(obj); 1074 return ret; 1075 } 1076 EXPORT_SYMBOL(drm_gem_mmap_obj); 1077 1078 /** 1079 * drm_gem_mmap - memory map routine for GEM objects 1080 * @filp: DRM file pointer 1081 * @vma: VMA for the area to be mapped 1082 * 1083 * If a driver supports GEM object mapping, mmap calls on the DRM file 1084 * descriptor will end up here. 1085 * 1086 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1087 * contain the fake offset we created when the GTT map ioctl was called on 1088 * the object) and map it with a call to drm_gem_mmap_obj(). 1089 * 1090 * If the caller is not granted access to the buffer object, the mmap will fail 1091 * with EACCES. Please see the vma manager for more information. 1092 */ 1093 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1094 { 1095 struct drm_file *priv = filp->private_data; 1096 struct drm_device *dev = priv->minor->dev; 1097 struct drm_gem_object *obj = NULL; 1098 struct drm_vma_offset_node *node; 1099 int ret; 1100 1101 if (drm_dev_is_unplugged(dev)) 1102 return -ENODEV; 1103 1104 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1105 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1106 vma->vm_pgoff, 1107 vma_pages(vma)); 1108 if (likely(node)) { 1109 obj = container_of(node, struct drm_gem_object, vma_node); 1110 /* 1111 * When the object is being freed, after it hits 0-refcnt it 1112 * proceeds to tear down the object. In the process it will 1113 * attempt to remove the VMA offset and so acquire this 1114 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1115 * that matches our range, we know it is in the process of being 1116 * destroyed and will be freed as soon as we release the lock - 1117 * so we have to check for the 0-refcnted object and treat it as 1118 * invalid. 1119 */ 1120 if (!kref_get_unless_zero(&obj->refcount)) 1121 obj = NULL; 1122 } 1123 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1124 1125 if (!obj) 1126 return -EINVAL; 1127 1128 if (!drm_vma_node_is_allowed(node, priv)) { 1129 drm_gem_object_put(obj); 1130 return -EACCES; 1131 } 1132 1133 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1134 vma); 1135 1136 drm_gem_object_put(obj); 1137 1138 return ret; 1139 } 1140 EXPORT_SYMBOL(drm_gem_mmap); 1141 1142 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1143 const struct drm_gem_object *obj) 1144 { 1145 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1146 drm_printf_indent(p, indent, "refcount=%u\n", 1147 kref_read(&obj->refcount)); 1148 drm_printf_indent(p, indent, "start=%08lx\n", 1149 drm_vma_node_start(&obj->vma_node)); 1150 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1151 drm_printf_indent(p, indent, "imported=%s\n", 1152 str_yes_no(obj->import_attach)); 1153 1154 if (obj->funcs->print_info) 1155 obj->funcs->print_info(p, indent, obj); 1156 } 1157 1158 int drm_gem_pin(struct drm_gem_object *obj) 1159 { 1160 if (obj->funcs->pin) 1161 return obj->funcs->pin(obj); 1162 else 1163 return 0; 1164 } 1165 1166 void drm_gem_unpin(struct drm_gem_object *obj) 1167 { 1168 if (obj->funcs->unpin) 1169 obj->funcs->unpin(obj); 1170 } 1171 1172 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) 1173 { 1174 int ret; 1175 1176 if (!obj->funcs->vmap) 1177 return -EOPNOTSUPP; 1178 1179 ret = obj->funcs->vmap(obj, map); 1180 if (ret) 1181 return ret; 1182 else if (iosys_map_is_null(map)) 1183 return -ENOMEM; 1184 1185 return 0; 1186 } 1187 EXPORT_SYMBOL(drm_gem_vmap); 1188 1189 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1190 { 1191 if (iosys_map_is_null(map)) 1192 return; 1193 1194 if (obj->funcs->vunmap) 1195 obj->funcs->vunmap(obj, map); 1196 1197 /* Always set the mapping to NULL. Callers may rely on this. */ 1198 iosys_map_clear(map); 1199 } 1200 EXPORT_SYMBOL(drm_gem_vunmap); 1201 1202 /** 1203 * drm_gem_lock_reservations - Sets up the ww context and acquires 1204 * the lock on an array of GEM objects. 1205 * 1206 * Once you've locked your reservations, you'll want to set up space 1207 * for your shared fences (if applicable), submit your job, then 1208 * drm_gem_unlock_reservations(). 1209 * 1210 * @objs: drm_gem_objects to lock 1211 * @count: Number of objects in @objs 1212 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1213 * part of tracking this set of locked reservations. 1214 */ 1215 int 1216 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1217 struct ww_acquire_ctx *acquire_ctx) 1218 { 1219 int contended = -1; 1220 int i, ret; 1221 1222 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1223 1224 retry: 1225 if (contended != -1) { 1226 struct drm_gem_object *obj = objs[contended]; 1227 1228 ret = dma_resv_lock_slow_interruptible(obj->resv, 1229 acquire_ctx); 1230 if (ret) { 1231 ww_acquire_fini(acquire_ctx); 1232 return ret; 1233 } 1234 } 1235 1236 for (i = 0; i < count; i++) { 1237 if (i == contended) 1238 continue; 1239 1240 ret = dma_resv_lock_interruptible(objs[i]->resv, 1241 acquire_ctx); 1242 if (ret) { 1243 int j; 1244 1245 for (j = 0; j < i; j++) 1246 dma_resv_unlock(objs[j]->resv); 1247 1248 if (contended != -1 && contended >= i) 1249 dma_resv_unlock(objs[contended]->resv); 1250 1251 if (ret == -EDEADLK) { 1252 contended = i; 1253 goto retry; 1254 } 1255 1256 ww_acquire_fini(acquire_ctx); 1257 return ret; 1258 } 1259 } 1260 1261 ww_acquire_done(acquire_ctx); 1262 1263 return 0; 1264 } 1265 EXPORT_SYMBOL(drm_gem_lock_reservations); 1266 1267 void 1268 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1269 struct ww_acquire_ctx *acquire_ctx) 1270 { 1271 int i; 1272 1273 for (i = 0; i < count; i++) 1274 dma_resv_unlock(objs[i]->resv); 1275 1276 ww_acquire_fini(acquire_ctx); 1277 } 1278 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1279 1280 /** 1281 * drm_gem_lru_init - initialize a LRU 1282 * 1283 * @lru: The LRU to initialize 1284 * @lock: The lock protecting the LRU 1285 */ 1286 void 1287 drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) 1288 { 1289 lru->lock = lock; 1290 lru->count = 0; 1291 INIT_LIST_HEAD(&lru->list); 1292 } 1293 EXPORT_SYMBOL(drm_gem_lru_init); 1294 1295 static void 1296 drm_gem_lru_remove_locked(struct drm_gem_object *obj) 1297 { 1298 obj->lru->count -= obj->size >> PAGE_SHIFT; 1299 WARN_ON(obj->lru->count < 0); 1300 list_del(&obj->lru_node); 1301 obj->lru = NULL; 1302 } 1303 1304 /** 1305 * drm_gem_lru_remove - remove object from whatever LRU it is in 1306 * 1307 * If the object is currently in any LRU, remove it. 1308 * 1309 * @obj: The GEM object to remove from current LRU 1310 */ 1311 void 1312 drm_gem_lru_remove(struct drm_gem_object *obj) 1313 { 1314 struct drm_gem_lru *lru = obj->lru; 1315 1316 if (!lru) 1317 return; 1318 1319 mutex_lock(lru->lock); 1320 drm_gem_lru_remove_locked(obj); 1321 mutex_unlock(lru->lock); 1322 } 1323 EXPORT_SYMBOL(drm_gem_lru_remove); 1324 1325 static void 1326 drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1327 { 1328 lockdep_assert_held_once(lru->lock); 1329 1330 if (obj->lru) 1331 drm_gem_lru_remove_locked(obj); 1332 1333 lru->count += obj->size >> PAGE_SHIFT; 1334 list_add_tail(&obj->lru_node, &lru->list); 1335 obj->lru = lru; 1336 } 1337 1338 /** 1339 * drm_gem_lru_move_tail - move the object to the tail of the LRU 1340 * 1341 * If the object is already in this LRU it will be moved to the 1342 * tail. Otherwise it will be removed from whichever other LRU 1343 * it is in (if any) and moved into this LRU. 1344 * 1345 * @lru: The LRU to move the object into. 1346 * @obj: The GEM object to move into this LRU 1347 */ 1348 void 1349 drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) 1350 { 1351 mutex_lock(lru->lock); 1352 drm_gem_lru_move_tail_locked(lru, obj); 1353 mutex_unlock(lru->lock); 1354 } 1355 EXPORT_SYMBOL(drm_gem_lru_move_tail); 1356 1357 /** 1358 * drm_gem_lru_scan - helper to implement shrinker.scan_objects 1359 * 1360 * If the shrink callback succeeds, it is expected that the driver 1361 * move the object out of this LRU. 1362 * 1363 * If the LRU possibly contain active buffers, it is the responsibility 1364 * of the shrink callback to check for this (ie. dma_resv_test_signaled()) 1365 * or if necessary block until the buffer becomes idle. 1366 * 1367 * @lru: The LRU to scan 1368 * @nr_to_scan: The number of pages to try to reclaim 1369 * @shrink: Callback to try to shrink/reclaim the object. 1370 */ 1371 unsigned long 1372 drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, 1373 bool (*shrink)(struct drm_gem_object *obj)) 1374 { 1375 struct drm_gem_lru still_in_lru; 1376 struct drm_gem_object *obj; 1377 unsigned freed = 0; 1378 1379 drm_gem_lru_init(&still_in_lru, lru->lock); 1380 1381 mutex_lock(lru->lock); 1382 1383 while (freed < nr_to_scan) { 1384 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); 1385 1386 if (!obj) 1387 break; 1388 1389 drm_gem_lru_move_tail_locked(&still_in_lru, obj); 1390 1391 /* 1392 * If it's in the process of being freed, gem_object->free() 1393 * may be blocked on lock waiting to remove it. So just 1394 * skip it. 1395 */ 1396 if (!kref_get_unless_zero(&obj->refcount)) 1397 continue; 1398 1399 /* 1400 * Now that we own a reference, we can drop the lock for the 1401 * rest of the loop body, to reduce contention with other 1402 * code paths that need the LRU lock 1403 */ 1404 mutex_unlock(lru->lock); 1405 1406 /* 1407 * Note that this still needs to be trylock, since we can 1408 * hit shrinker in response to trying to get backing pages 1409 * for this obj (ie. while it's lock is already held) 1410 */ 1411 if (!dma_resv_trylock(obj->resv)) 1412 goto tail; 1413 1414 if (shrink(obj)) { 1415 freed += obj->size >> PAGE_SHIFT; 1416 1417 /* 1418 * If we succeeded in releasing the object's backing 1419 * pages, we expect the driver to have moved the object 1420 * out of this LRU 1421 */ 1422 WARN_ON(obj->lru == &still_in_lru); 1423 WARN_ON(obj->lru == lru); 1424 } 1425 1426 dma_resv_unlock(obj->resv); 1427 1428 tail: 1429 drm_gem_object_put(obj); 1430 mutex_lock(lru->lock); 1431 } 1432 1433 /* 1434 * Move objects we've skipped over out of the temporary still_in_lru 1435 * back into this LRU 1436 */ 1437 list_for_each_entry (obj, &still_in_lru.list, lru_node) 1438 obj->lru = lru; 1439 list_splice_tail(&still_in_lru.list, &lru->list); 1440 lru->count += still_in_lru.count; 1441 1442 mutex_unlock(lru->lock); 1443 1444 return freed; 1445 } 1446 EXPORT_SYMBOL(drm_gem_lru_scan); 1447