1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include <linux/dma-buf.h> 39 #include <linux/mem_encrypt.h> 40 #include <linux/pagevec.h> 41 #include <drm/drmP.h> 42 #include <drm/drm_vma_manager.h> 43 #include <drm/drm_gem.h> 44 #include <drm/drm_print.h> 45 #include "drm_internal.h" 46 47 /** @file drm_gem.c 48 * 49 * This file provides some of the base ioctls and library routines for 50 * the graphics memory manager implemented by each device driver. 51 * 52 * Because various devices have different requirements in terms of 53 * synchronization and migration strategies, implementing that is left up to 54 * the driver, and all that the general API provides should be generic -- 55 * allocating objects, reading/writing data with the cpu, freeing objects. 56 * Even there, platform-dependent optimizations for reading/writing data with 57 * the CPU mean we'll likely hook those out to driver-specific calls. However, 58 * the DRI2 implementation wants to have at least allocate/mmap be generic. 59 * 60 * The goal was to have swap-backed object allocation managed through 61 * struct file. However, file descriptors as handles to a struct file have 62 * two major failings: 63 * - Process limits prevent more than 1024 or so being used at a time by 64 * default. 65 * - Inability to allocate high fds will aggravate the X Server's select() 66 * handling, and likely that of many GL client applications as well. 67 * 68 * This led to a plan of using our own integer IDs (called handles, following 69 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 70 * ioctls. The objects themselves will still include the struct file so 71 * that we can transition to fds if the required kernel infrastructure shows 72 * up at a later date, and as our interface with shmfs for memory allocation. 73 */ 74 75 /** 76 * drm_gem_init - Initialize the GEM device fields 77 * @dev: drm_devic structure to initialize 78 */ 79 int 80 drm_gem_init(struct drm_device *dev) 81 { 82 struct drm_vma_offset_manager *vma_offset_manager; 83 84 mutex_init(&dev->object_name_lock); 85 idr_init_base(&dev->object_name_idr, 1); 86 87 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); 88 if (!vma_offset_manager) { 89 DRM_ERROR("out of memory\n"); 90 return -ENOMEM; 91 } 92 93 dev->vma_offset_manager = vma_offset_manager; 94 drm_vma_offset_manager_init(vma_offset_manager, 95 DRM_FILE_PAGE_OFFSET_START, 96 DRM_FILE_PAGE_OFFSET_SIZE); 97 98 return 0; 99 } 100 101 void 102 drm_gem_destroy(struct drm_device *dev) 103 { 104 105 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 106 kfree(dev->vma_offset_manager); 107 dev->vma_offset_manager = NULL; 108 } 109 110 /** 111 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 112 * @dev: drm_device the object should be initialized for 113 * @obj: drm_gem_object to initialize 114 * @size: object size 115 * 116 * Initialize an already allocated GEM object of the specified size with 117 * shmfs backing store. 118 */ 119 int drm_gem_object_init(struct drm_device *dev, 120 struct drm_gem_object *obj, size_t size) 121 { 122 struct file *filp; 123 124 drm_gem_private_object_init(dev, obj, size); 125 126 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 127 if (IS_ERR(filp)) 128 return PTR_ERR(filp); 129 130 obj->filp = filp; 131 132 return 0; 133 } 134 EXPORT_SYMBOL(drm_gem_object_init); 135 136 /** 137 * drm_gem_private_object_init - initialize an allocated private GEM object 138 * @dev: drm_device the object should be initialized for 139 * @obj: drm_gem_object to initialize 140 * @size: object size 141 * 142 * Initialize an already allocated GEM object of the specified size with 143 * no GEM provided backing store. Instead the caller is responsible for 144 * backing the object and handling it. 145 */ 146 void drm_gem_private_object_init(struct drm_device *dev, 147 struct drm_gem_object *obj, size_t size) 148 { 149 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 150 151 obj->dev = dev; 152 obj->filp = NULL; 153 154 kref_init(&obj->refcount); 155 obj->handle_count = 0; 156 obj->size = size; 157 reservation_object_init(&obj->_resv); 158 if (!obj->resv) 159 obj->resv = &obj->_resv; 160 161 drm_vma_node_reset(&obj->vma_node); 162 } 163 EXPORT_SYMBOL(drm_gem_private_object_init); 164 165 static void 166 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 167 { 168 /* 169 * Note: obj->dma_buf can't disappear as long as we still hold a 170 * handle reference in obj->handle_count. 171 */ 172 mutex_lock(&filp->prime.lock); 173 if (obj->dma_buf) { 174 drm_prime_remove_buf_handle_locked(&filp->prime, 175 obj->dma_buf); 176 } 177 mutex_unlock(&filp->prime.lock); 178 } 179 180 /** 181 * drm_gem_object_handle_free - release resources bound to userspace handles 182 * @obj: GEM object to clean up. 183 * 184 * Called after the last handle to the object has been closed 185 * 186 * Removes any name for the object. Note that this must be 187 * called before drm_gem_object_free or we'll be touching 188 * freed memory 189 */ 190 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 191 { 192 struct drm_device *dev = obj->dev; 193 194 /* Remove any name for this object */ 195 if (obj->name) { 196 idr_remove(&dev->object_name_idr, obj->name); 197 obj->name = 0; 198 } 199 } 200 201 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 202 { 203 /* Unbreak the reference cycle if we have an exported dma_buf. */ 204 if (obj->dma_buf) { 205 dma_buf_put(obj->dma_buf); 206 obj->dma_buf = NULL; 207 } 208 } 209 210 static void 211 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 212 { 213 struct drm_device *dev = obj->dev; 214 bool final = false; 215 216 if (WARN_ON(obj->handle_count == 0)) 217 return; 218 219 /* 220 * Must bump handle count first as this may be the last 221 * ref, in which case the object would disappear before we 222 * checked for a name 223 */ 224 225 mutex_lock(&dev->object_name_lock); 226 if (--obj->handle_count == 0) { 227 drm_gem_object_handle_free(obj); 228 drm_gem_object_exported_dma_buf_free(obj); 229 final = true; 230 } 231 mutex_unlock(&dev->object_name_lock); 232 233 if (final) 234 drm_gem_object_put_unlocked(obj); 235 } 236 237 /* 238 * Called at device or object close to release the file's 239 * handle references on objects. 240 */ 241 static int 242 drm_gem_object_release_handle(int id, void *ptr, void *data) 243 { 244 struct drm_file *file_priv = data; 245 struct drm_gem_object *obj = ptr; 246 struct drm_device *dev = obj->dev; 247 248 if (obj->funcs && obj->funcs->close) 249 obj->funcs->close(obj, file_priv); 250 else if (dev->driver->gem_close_object) 251 dev->driver->gem_close_object(obj, file_priv); 252 253 if (drm_core_check_feature(dev, DRIVER_PRIME)) 254 drm_gem_remove_prime_handles(obj, file_priv); 255 drm_vma_node_revoke(&obj->vma_node, file_priv); 256 257 drm_gem_object_handle_put_unlocked(obj); 258 259 return 0; 260 } 261 262 /** 263 * drm_gem_handle_delete - deletes the given file-private handle 264 * @filp: drm file-private structure to use for the handle look up 265 * @handle: userspace handle to delete 266 * 267 * Removes the GEM handle from the @filp lookup table which has been added with 268 * drm_gem_handle_create(). If this is the last handle also cleans up linked 269 * resources like GEM names. 270 */ 271 int 272 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 273 { 274 struct drm_gem_object *obj; 275 276 spin_lock(&filp->table_lock); 277 278 /* Check if we currently have a reference on the object */ 279 obj = idr_replace(&filp->object_idr, NULL, handle); 280 spin_unlock(&filp->table_lock); 281 if (IS_ERR_OR_NULL(obj)) 282 return -EINVAL; 283 284 /* Release driver's reference and decrement refcount. */ 285 drm_gem_object_release_handle(handle, obj, filp); 286 287 /* And finally make the handle available for future allocations. */ 288 spin_lock(&filp->table_lock); 289 idr_remove(&filp->object_idr, handle); 290 spin_unlock(&filp->table_lock); 291 292 return 0; 293 } 294 EXPORT_SYMBOL(drm_gem_handle_delete); 295 296 /** 297 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 298 * @file: drm file-private structure containing the gem object 299 * @dev: corresponding drm_device 300 * @handle: gem object handle 301 * @offset: return location for the fake mmap offset 302 * 303 * This implements the &drm_driver.dumb_map_offset kms driver callback for 304 * drivers which use gem to manage their backing storage. 305 * 306 * Returns: 307 * 0 on success or a negative error code on failure. 308 */ 309 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 310 u32 handle, u64 *offset) 311 { 312 struct drm_gem_object *obj; 313 int ret; 314 315 obj = drm_gem_object_lookup(file, handle); 316 if (!obj) 317 return -ENOENT; 318 319 /* Don't allow imported objects to be mapped */ 320 if (obj->import_attach) { 321 ret = -EINVAL; 322 goto out; 323 } 324 325 ret = drm_gem_create_mmap_offset(obj); 326 if (ret) 327 goto out; 328 329 *offset = drm_vma_node_offset_addr(&obj->vma_node); 330 out: 331 drm_gem_object_put_unlocked(obj); 332 333 return ret; 334 } 335 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 336 337 /** 338 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 339 * @file: drm file-private structure to remove the dumb handle from 340 * @dev: corresponding drm_device 341 * @handle: the dumb handle to remove 342 * 343 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers 344 * which use gem to manage their backing storage. 345 */ 346 int drm_gem_dumb_destroy(struct drm_file *file, 347 struct drm_device *dev, 348 uint32_t handle) 349 { 350 return drm_gem_handle_delete(file, handle); 351 } 352 EXPORT_SYMBOL(drm_gem_dumb_destroy); 353 354 /** 355 * drm_gem_handle_create_tail - internal functions to create a handle 356 * @file_priv: drm file-private structure to register the handle for 357 * @obj: object to register 358 * @handlep: pointer to return the created handle to the caller 359 * 360 * This expects the &drm_device.object_name_lock to be held already and will 361 * drop it before returning. Used to avoid races in establishing new handles 362 * when importing an object from either an flink name or a dma-buf. 363 * 364 * Handles must be release again through drm_gem_handle_delete(). This is done 365 * when userspace closes @file_priv for all attached handles, or through the 366 * GEM_CLOSE ioctl for individual handles. 367 */ 368 int 369 drm_gem_handle_create_tail(struct drm_file *file_priv, 370 struct drm_gem_object *obj, 371 u32 *handlep) 372 { 373 struct drm_device *dev = obj->dev; 374 u32 handle; 375 int ret; 376 377 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 378 if (obj->handle_count++ == 0) 379 drm_gem_object_get(obj); 380 381 /* 382 * Get the user-visible handle using idr. Preload and perform 383 * allocation under our spinlock. 384 */ 385 idr_preload(GFP_KERNEL); 386 spin_lock(&file_priv->table_lock); 387 388 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 389 390 spin_unlock(&file_priv->table_lock); 391 idr_preload_end(); 392 393 mutex_unlock(&dev->object_name_lock); 394 if (ret < 0) 395 goto err_unref; 396 397 handle = ret; 398 399 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 400 if (ret) 401 goto err_remove; 402 403 if (obj->funcs && obj->funcs->open) { 404 ret = obj->funcs->open(obj, file_priv); 405 if (ret) 406 goto err_revoke; 407 } else if (dev->driver->gem_open_object) { 408 ret = dev->driver->gem_open_object(obj, file_priv); 409 if (ret) 410 goto err_revoke; 411 } 412 413 *handlep = handle; 414 return 0; 415 416 err_revoke: 417 drm_vma_node_revoke(&obj->vma_node, file_priv); 418 err_remove: 419 spin_lock(&file_priv->table_lock); 420 idr_remove(&file_priv->object_idr, handle); 421 spin_unlock(&file_priv->table_lock); 422 err_unref: 423 drm_gem_object_handle_put_unlocked(obj); 424 return ret; 425 } 426 427 /** 428 * drm_gem_handle_create - create a gem handle for an object 429 * @file_priv: drm file-private structure to register the handle for 430 * @obj: object to register 431 * @handlep: pionter to return the created handle to the caller 432 * 433 * Create a handle for this object. This adds a handle reference to the object, 434 * which includes a regular reference count. Callers will likely want to 435 * dereference the object afterwards. 436 * 437 * Since this publishes @obj to userspace it must be fully set up by this point, 438 * drivers must call this last in their buffer object creation callbacks. 439 */ 440 int drm_gem_handle_create(struct drm_file *file_priv, 441 struct drm_gem_object *obj, 442 u32 *handlep) 443 { 444 mutex_lock(&obj->dev->object_name_lock); 445 446 return drm_gem_handle_create_tail(file_priv, obj, handlep); 447 } 448 EXPORT_SYMBOL(drm_gem_handle_create); 449 450 451 /** 452 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 453 * @obj: obj in question 454 * 455 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 456 * 457 * Note that drm_gem_object_release() already calls this function, so drivers 458 * don't have to take care of releasing the mmap offset themselves when freeing 459 * the GEM object. 460 */ 461 void 462 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 463 { 464 struct drm_device *dev = obj->dev; 465 466 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 467 } 468 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 469 470 /** 471 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 472 * @obj: obj in question 473 * @size: the virtual size 474 * 475 * GEM memory mapping works by handing back to userspace a fake mmap offset 476 * it can use in a subsequent mmap(2) call. The DRM core code then looks 477 * up the object based on the offset and sets up the various memory mapping 478 * structures. 479 * 480 * This routine allocates and attaches a fake offset for @obj, in cases where 481 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 482 * Otherwise just use drm_gem_create_mmap_offset(). 483 * 484 * This function is idempotent and handles an already allocated mmap offset 485 * transparently. Drivers do not need to check for this case. 486 */ 487 int 488 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 489 { 490 struct drm_device *dev = obj->dev; 491 492 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 493 size / PAGE_SIZE); 494 } 495 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 496 497 /** 498 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 499 * @obj: obj in question 500 * 501 * GEM memory mapping works by handing back to userspace a fake mmap offset 502 * it can use in a subsequent mmap(2) call. The DRM core code then looks 503 * up the object based on the offset and sets up the various memory mapping 504 * structures. 505 * 506 * This routine allocates and attaches a fake offset for @obj. 507 * 508 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 509 * the fake offset again. 510 */ 511 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 512 { 513 return drm_gem_create_mmap_offset_size(obj, obj->size); 514 } 515 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 516 517 /* 518 * Move pages to appropriate lru and release the pagevec, decrementing the 519 * ref count of those pages. 520 */ 521 static void drm_gem_check_release_pagevec(struct pagevec *pvec) 522 { 523 check_move_unevictable_pages(pvec); 524 __pagevec_release(pvec); 525 cond_resched(); 526 } 527 528 /** 529 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 530 * from shmem 531 * @obj: obj in question 532 * 533 * This reads the page-array of the shmem-backing storage of the given gem 534 * object. An array of pages is returned. If a page is not allocated or 535 * swapped-out, this will allocate/swap-in the required pages. Note that the 536 * whole object is covered by the page-array and pinned in memory. 537 * 538 * Use drm_gem_put_pages() to release the array and unpin all pages. 539 * 540 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 541 * If you require other GFP-masks, you have to do those allocations yourself. 542 * 543 * Note that you are not allowed to change gfp-zones during runtime. That is, 544 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 545 * set during initialization. If you have special zone constraints, set them 546 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 547 * to keep pages in the required zone during swap-in. 548 */ 549 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 550 { 551 struct address_space *mapping; 552 struct page *p, **pages; 553 struct pagevec pvec; 554 int i, npages; 555 556 /* This is the shared memory object that backs the GEM resource */ 557 mapping = obj->filp->f_mapping; 558 559 /* We already BUG_ON() for non-page-aligned sizes in 560 * drm_gem_object_init(), so we should never hit this unless 561 * driver author is doing something really wrong: 562 */ 563 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 564 565 npages = obj->size >> PAGE_SHIFT; 566 567 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 568 if (pages == NULL) 569 return ERR_PTR(-ENOMEM); 570 571 mapping_set_unevictable(mapping); 572 573 for (i = 0; i < npages; i++) { 574 p = shmem_read_mapping_page(mapping, i); 575 if (IS_ERR(p)) 576 goto fail; 577 pages[i] = p; 578 579 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 580 * correct region during swapin. Note that this requires 581 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 582 * so shmem can relocate pages during swapin if required. 583 */ 584 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 585 (page_to_pfn(p) >= 0x00100000UL)); 586 } 587 588 return pages; 589 590 fail: 591 mapping_clear_unevictable(mapping); 592 pagevec_init(&pvec); 593 while (i--) { 594 if (!pagevec_add(&pvec, pages[i])) 595 drm_gem_check_release_pagevec(&pvec); 596 } 597 if (pagevec_count(&pvec)) 598 drm_gem_check_release_pagevec(&pvec); 599 600 kvfree(pages); 601 return ERR_CAST(p); 602 } 603 EXPORT_SYMBOL(drm_gem_get_pages); 604 605 /** 606 * drm_gem_put_pages - helper to free backing pages for a GEM object 607 * @obj: obj in question 608 * @pages: pages to free 609 * @dirty: if true, pages will be marked as dirty 610 * @accessed: if true, the pages will be marked as accessed 611 */ 612 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 613 bool dirty, bool accessed) 614 { 615 int i, npages; 616 struct address_space *mapping; 617 struct pagevec pvec; 618 619 mapping = file_inode(obj->filp)->i_mapping; 620 mapping_clear_unevictable(mapping); 621 622 /* We already BUG_ON() for non-page-aligned sizes in 623 * drm_gem_object_init(), so we should never hit this unless 624 * driver author is doing something really wrong: 625 */ 626 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 627 628 npages = obj->size >> PAGE_SHIFT; 629 630 pagevec_init(&pvec); 631 for (i = 0; i < npages; i++) { 632 if (dirty) 633 set_page_dirty(pages[i]); 634 635 if (accessed) 636 mark_page_accessed(pages[i]); 637 638 /* Undo the reference we took when populating the table */ 639 if (!pagevec_add(&pvec, pages[i])) 640 drm_gem_check_release_pagevec(&pvec); 641 } 642 if (pagevec_count(&pvec)) 643 drm_gem_check_release_pagevec(&pvec); 644 645 kvfree(pages); 646 } 647 EXPORT_SYMBOL(drm_gem_put_pages); 648 649 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 650 struct drm_gem_object **objs) 651 { 652 int i, ret = 0; 653 struct drm_gem_object *obj; 654 655 spin_lock(&filp->table_lock); 656 657 for (i = 0; i < count; i++) { 658 /* Check if we currently have a reference on the object */ 659 obj = idr_find(&filp->object_idr, handle[i]); 660 if (!obj) { 661 ret = -ENOENT; 662 break; 663 } 664 drm_gem_object_get(obj); 665 objs[i] = obj; 666 } 667 spin_unlock(&filp->table_lock); 668 669 return ret; 670 } 671 672 /** 673 * drm_gem_objects_lookup - look up GEM objects from an array of handles 674 * @filp: DRM file private date 675 * @bo_handles: user pointer to array of userspace handle 676 * @count: size of handle array 677 * @objs_out: returned pointer to array of drm_gem_object pointers 678 * 679 * Takes an array of userspace handles and returns a newly allocated array of 680 * GEM objects. 681 * 682 * For a single handle lookup, use drm_gem_object_lookup(). 683 * 684 * Returns: 685 * 686 * @objs filled in with GEM object pointers. Returned GEM objects need to be 687 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 688 * failure. 0 is returned on success. 689 * 690 */ 691 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 692 int count, struct drm_gem_object ***objs_out) 693 { 694 int ret; 695 u32 *handles; 696 struct drm_gem_object **objs; 697 698 if (!count) 699 return 0; 700 701 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 702 GFP_KERNEL | __GFP_ZERO); 703 if (!objs) 704 return -ENOMEM; 705 706 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 707 if (!handles) { 708 ret = -ENOMEM; 709 goto out; 710 } 711 712 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 713 ret = -EFAULT; 714 DRM_DEBUG("Failed to copy in GEM handles\n"); 715 goto out; 716 } 717 718 ret = objects_lookup(filp, handles, count, objs); 719 *objs_out = objs; 720 721 out: 722 kvfree(handles); 723 return ret; 724 725 } 726 EXPORT_SYMBOL(drm_gem_objects_lookup); 727 728 /** 729 * drm_gem_object_lookup - look up a GEM object from its handle 730 * @filp: DRM file private date 731 * @handle: userspace handle 732 * 733 * Returns: 734 * 735 * A reference to the object named by the handle if such exists on @filp, NULL 736 * otherwise. 737 * 738 * If looking up an array of handles, use drm_gem_objects_lookup(). 739 */ 740 struct drm_gem_object * 741 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 742 { 743 struct drm_gem_object *obj = NULL; 744 745 objects_lookup(filp, &handle, 1, &obj); 746 return obj; 747 } 748 EXPORT_SYMBOL(drm_gem_object_lookup); 749 750 /** 751 * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects 752 * shared and/or exclusive fences. 753 * @filep: DRM file private date 754 * @handle: userspace handle 755 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 756 * @timeout: timeout value in jiffies or zero to return immediately 757 * 758 * Returns: 759 * 760 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 761 * greater than 0 on success. 762 */ 763 long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, 764 bool wait_all, unsigned long timeout) 765 { 766 long ret; 767 struct drm_gem_object *obj; 768 769 obj = drm_gem_object_lookup(filep, handle); 770 if (!obj) { 771 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 772 return -EINVAL; 773 } 774 775 ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, 776 true, timeout); 777 if (ret == 0) 778 ret = -ETIME; 779 else if (ret > 0) 780 ret = 0; 781 782 drm_gem_object_put_unlocked(obj); 783 784 return ret; 785 } 786 EXPORT_SYMBOL(drm_gem_reservation_object_wait); 787 788 /** 789 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 790 * @dev: drm_device 791 * @data: ioctl data 792 * @file_priv: drm file-private structure 793 * 794 * Releases the handle to an mm object. 795 */ 796 int 797 drm_gem_close_ioctl(struct drm_device *dev, void *data, 798 struct drm_file *file_priv) 799 { 800 struct drm_gem_close *args = data; 801 int ret; 802 803 if (!drm_core_check_feature(dev, DRIVER_GEM)) 804 return -EOPNOTSUPP; 805 806 ret = drm_gem_handle_delete(file_priv, args->handle); 807 808 return ret; 809 } 810 811 /** 812 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 813 * @dev: drm_device 814 * @data: ioctl data 815 * @file_priv: drm file-private structure 816 * 817 * Create a global name for an object, returning the name. 818 * 819 * Note that the name does not hold a reference; when the object 820 * is freed, the name goes away. 821 */ 822 int 823 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 824 struct drm_file *file_priv) 825 { 826 struct drm_gem_flink *args = data; 827 struct drm_gem_object *obj; 828 int ret; 829 830 if (!drm_core_check_feature(dev, DRIVER_GEM)) 831 return -EOPNOTSUPP; 832 833 obj = drm_gem_object_lookup(file_priv, args->handle); 834 if (obj == NULL) 835 return -ENOENT; 836 837 mutex_lock(&dev->object_name_lock); 838 /* prevent races with concurrent gem_close. */ 839 if (obj->handle_count == 0) { 840 ret = -ENOENT; 841 goto err; 842 } 843 844 if (!obj->name) { 845 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 846 if (ret < 0) 847 goto err; 848 849 obj->name = ret; 850 } 851 852 args->name = (uint64_t) obj->name; 853 ret = 0; 854 855 err: 856 mutex_unlock(&dev->object_name_lock); 857 drm_gem_object_put_unlocked(obj); 858 return ret; 859 } 860 861 /** 862 * drm_gem_open - implementation of the GEM_OPEN ioctl 863 * @dev: drm_device 864 * @data: ioctl data 865 * @file_priv: drm file-private structure 866 * 867 * Open an object using the global name, returning a handle and the size. 868 * 869 * This handle (of course) holds a reference to the object, so the object 870 * will not go away until the handle is deleted. 871 */ 872 int 873 drm_gem_open_ioctl(struct drm_device *dev, void *data, 874 struct drm_file *file_priv) 875 { 876 struct drm_gem_open *args = data; 877 struct drm_gem_object *obj; 878 int ret; 879 u32 handle; 880 881 if (!drm_core_check_feature(dev, DRIVER_GEM)) 882 return -EOPNOTSUPP; 883 884 mutex_lock(&dev->object_name_lock); 885 obj = idr_find(&dev->object_name_idr, (int) args->name); 886 if (obj) { 887 drm_gem_object_get(obj); 888 } else { 889 mutex_unlock(&dev->object_name_lock); 890 return -ENOENT; 891 } 892 893 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 894 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 895 drm_gem_object_put_unlocked(obj); 896 if (ret) 897 return ret; 898 899 args->handle = handle; 900 args->size = obj->size; 901 902 return 0; 903 } 904 905 /** 906 * gem_gem_open - initalizes GEM file-private structures at devnode open time 907 * @dev: drm_device which is being opened by userspace 908 * @file_private: drm file-private structure to set up 909 * 910 * Called at device open time, sets up the structure for handling refcounting 911 * of mm objects. 912 */ 913 void 914 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 915 { 916 idr_init_base(&file_private->object_idr, 1); 917 spin_lock_init(&file_private->table_lock); 918 } 919 920 /** 921 * drm_gem_release - release file-private GEM resources 922 * @dev: drm_device which is being closed by userspace 923 * @file_private: drm file-private structure to clean up 924 * 925 * Called at close time when the filp is going away. 926 * 927 * Releases any remaining references on objects by this filp. 928 */ 929 void 930 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 931 { 932 idr_for_each(&file_private->object_idr, 933 &drm_gem_object_release_handle, file_private); 934 idr_destroy(&file_private->object_idr); 935 } 936 937 /** 938 * drm_gem_object_release - release GEM buffer object resources 939 * @obj: GEM buffer object 940 * 941 * This releases any structures and resources used by @obj and is the invers of 942 * drm_gem_object_init(). 943 */ 944 void 945 drm_gem_object_release(struct drm_gem_object *obj) 946 { 947 WARN_ON(obj->dma_buf); 948 949 if (obj->filp) 950 fput(obj->filp); 951 952 reservation_object_fini(&obj->_resv); 953 drm_gem_free_mmap_offset(obj); 954 } 955 EXPORT_SYMBOL(drm_gem_object_release); 956 957 /** 958 * drm_gem_object_free - free a GEM object 959 * @kref: kref of the object to free 960 * 961 * Called after the last reference to the object has been lost. 962 * Must be called holding &drm_device.struct_mutex. 963 * 964 * Frees the object 965 */ 966 void 967 drm_gem_object_free(struct kref *kref) 968 { 969 struct drm_gem_object *obj = 970 container_of(kref, struct drm_gem_object, refcount); 971 struct drm_device *dev = obj->dev; 972 973 if (obj->funcs) { 974 obj->funcs->free(obj); 975 } else if (dev->driver->gem_free_object_unlocked) { 976 dev->driver->gem_free_object_unlocked(obj); 977 } else if (dev->driver->gem_free_object) { 978 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 979 980 dev->driver->gem_free_object(obj); 981 } 982 } 983 EXPORT_SYMBOL(drm_gem_object_free); 984 985 /** 986 * drm_gem_object_put_unlocked - drop a GEM buffer object reference 987 * @obj: GEM buffer object 988 * 989 * This releases a reference to @obj. Callers must not hold the 990 * &drm_device.struct_mutex lock when calling this function. 991 * 992 * See also __drm_gem_object_put(). 993 */ 994 void 995 drm_gem_object_put_unlocked(struct drm_gem_object *obj) 996 { 997 struct drm_device *dev; 998 999 if (!obj) 1000 return; 1001 1002 dev = obj->dev; 1003 1004 if (dev->driver->gem_free_object) { 1005 might_lock(&dev->struct_mutex); 1006 if (kref_put_mutex(&obj->refcount, drm_gem_object_free, 1007 &dev->struct_mutex)) 1008 mutex_unlock(&dev->struct_mutex); 1009 } else { 1010 kref_put(&obj->refcount, drm_gem_object_free); 1011 } 1012 } 1013 EXPORT_SYMBOL(drm_gem_object_put_unlocked); 1014 1015 /** 1016 * drm_gem_object_put - release a GEM buffer object reference 1017 * @obj: GEM buffer object 1018 * 1019 * This releases a reference to @obj. Callers must hold the 1020 * &drm_device.struct_mutex lock when calling this function, even when the 1021 * driver doesn't use &drm_device.struct_mutex for anything. 1022 * 1023 * For drivers not encumbered with legacy locking use 1024 * drm_gem_object_put_unlocked() instead. 1025 */ 1026 void 1027 drm_gem_object_put(struct drm_gem_object *obj) 1028 { 1029 if (obj) { 1030 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 1031 1032 kref_put(&obj->refcount, drm_gem_object_free); 1033 } 1034 } 1035 EXPORT_SYMBOL(drm_gem_object_put); 1036 1037 /** 1038 * drm_gem_vm_open - vma->ops->open implementation for GEM 1039 * @vma: VM area structure 1040 * 1041 * This function implements the #vm_operations_struct open() callback for GEM 1042 * drivers. This must be used together with drm_gem_vm_close(). 1043 */ 1044 void drm_gem_vm_open(struct vm_area_struct *vma) 1045 { 1046 struct drm_gem_object *obj = vma->vm_private_data; 1047 1048 drm_gem_object_get(obj); 1049 } 1050 EXPORT_SYMBOL(drm_gem_vm_open); 1051 1052 /** 1053 * drm_gem_vm_close - vma->ops->close implementation for GEM 1054 * @vma: VM area structure 1055 * 1056 * This function implements the #vm_operations_struct close() callback for GEM 1057 * drivers. This must be used together with drm_gem_vm_open(). 1058 */ 1059 void drm_gem_vm_close(struct vm_area_struct *vma) 1060 { 1061 struct drm_gem_object *obj = vma->vm_private_data; 1062 1063 drm_gem_object_put_unlocked(obj); 1064 } 1065 EXPORT_SYMBOL(drm_gem_vm_close); 1066 1067 /** 1068 * drm_gem_mmap_obj - memory map a GEM object 1069 * @obj: the GEM object to map 1070 * @obj_size: the object size to be mapped, in bytes 1071 * @vma: VMA for the area to be mapped 1072 * 1073 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops 1074 * provided by the driver. Depending on their requirements, drivers can either 1075 * provide a fault handler in their gem_vm_ops (in which case any accesses to 1076 * the object will be trapped, to perform migration, GTT binding, surface 1077 * register allocation, or performance monitoring), or mmap the buffer memory 1078 * synchronously after calling drm_gem_mmap_obj. 1079 * 1080 * This function is mainly intended to implement the DMABUF mmap operation, when 1081 * the GEM object is not looked up based on its fake offset. To implement the 1082 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1083 * 1084 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1085 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1086 * callers must verify access restrictions before calling this helper. 1087 * 1088 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1089 * size, or if no gem_vm_ops are provided. 1090 */ 1091 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1092 struct vm_area_struct *vma) 1093 { 1094 struct drm_device *dev = obj->dev; 1095 1096 /* Check for valid size. */ 1097 if (obj_size < vma->vm_end - vma->vm_start) 1098 return -EINVAL; 1099 1100 if (obj->funcs && obj->funcs->vm_ops) 1101 vma->vm_ops = obj->funcs->vm_ops; 1102 else if (dev->driver->gem_vm_ops) 1103 vma->vm_ops = dev->driver->gem_vm_ops; 1104 else 1105 return -EINVAL; 1106 1107 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1108 vma->vm_private_data = obj; 1109 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1110 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1111 1112 /* Take a ref for this mapping of the object, so that the fault 1113 * handler can dereference the mmap offset's pointer to the object. 1114 * This reference is cleaned up by the corresponding vm_close 1115 * (which should happen whether the vma was created by this call, or 1116 * by a vm_open due to mremap or partial unmap or whatever). 1117 */ 1118 drm_gem_object_get(obj); 1119 1120 return 0; 1121 } 1122 EXPORT_SYMBOL(drm_gem_mmap_obj); 1123 1124 /** 1125 * drm_gem_mmap - memory map routine for GEM objects 1126 * @filp: DRM file pointer 1127 * @vma: VMA for the area to be mapped 1128 * 1129 * If a driver supports GEM object mapping, mmap calls on the DRM file 1130 * descriptor will end up here. 1131 * 1132 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1133 * contain the fake offset we created when the GTT map ioctl was called on 1134 * the object) and map it with a call to drm_gem_mmap_obj(). 1135 * 1136 * If the caller is not granted access to the buffer object, the mmap will fail 1137 * with EACCES. Please see the vma manager for more information. 1138 */ 1139 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1140 { 1141 struct drm_file *priv = filp->private_data; 1142 struct drm_device *dev = priv->minor->dev; 1143 struct drm_gem_object *obj = NULL; 1144 struct drm_vma_offset_node *node; 1145 int ret; 1146 1147 if (drm_dev_is_unplugged(dev)) 1148 return -ENODEV; 1149 1150 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1151 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1152 vma->vm_pgoff, 1153 vma_pages(vma)); 1154 if (likely(node)) { 1155 obj = container_of(node, struct drm_gem_object, vma_node); 1156 /* 1157 * When the object is being freed, after it hits 0-refcnt it 1158 * proceeds to tear down the object. In the process it will 1159 * attempt to remove the VMA offset and so acquire this 1160 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1161 * that matches our range, we know it is in the process of being 1162 * destroyed and will be freed as soon as we release the lock - 1163 * so we have to check for the 0-refcnted object and treat it as 1164 * invalid. 1165 */ 1166 if (!kref_get_unless_zero(&obj->refcount)) 1167 obj = NULL; 1168 } 1169 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1170 1171 if (!obj) 1172 return -EINVAL; 1173 1174 if (!drm_vma_node_is_allowed(node, priv)) { 1175 drm_gem_object_put_unlocked(obj); 1176 return -EACCES; 1177 } 1178 1179 if (node->readonly) { 1180 if (vma->vm_flags & VM_WRITE) { 1181 drm_gem_object_put_unlocked(obj); 1182 return -EINVAL; 1183 } 1184 1185 vma->vm_flags &= ~VM_MAYWRITE; 1186 } 1187 1188 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1189 vma); 1190 1191 drm_gem_object_put_unlocked(obj); 1192 1193 return ret; 1194 } 1195 EXPORT_SYMBOL(drm_gem_mmap); 1196 1197 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1198 const struct drm_gem_object *obj) 1199 { 1200 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1201 drm_printf_indent(p, indent, "refcount=%u\n", 1202 kref_read(&obj->refcount)); 1203 drm_printf_indent(p, indent, "start=%08lx\n", 1204 drm_vma_node_start(&obj->vma_node)); 1205 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1206 drm_printf_indent(p, indent, "imported=%s\n", 1207 obj->import_attach ? "yes" : "no"); 1208 1209 if (obj->funcs && obj->funcs->print_info) 1210 obj->funcs->print_info(p, indent, obj); 1211 else if (obj->dev->driver->gem_print_info) 1212 obj->dev->driver->gem_print_info(p, indent, obj); 1213 } 1214 1215 /** 1216 * drm_gem_pin - Pin backing buffer in memory 1217 * @obj: GEM object 1218 * 1219 * Make sure the backing buffer is pinned in memory. 1220 * 1221 * Returns: 1222 * 0 on success or a negative error code on failure. 1223 */ 1224 int drm_gem_pin(struct drm_gem_object *obj) 1225 { 1226 if (obj->funcs && obj->funcs->pin) 1227 return obj->funcs->pin(obj); 1228 else if (obj->dev->driver->gem_prime_pin) 1229 return obj->dev->driver->gem_prime_pin(obj); 1230 else 1231 return 0; 1232 } 1233 EXPORT_SYMBOL(drm_gem_pin); 1234 1235 /** 1236 * drm_gem_unpin - Unpin backing buffer from memory 1237 * @obj: GEM object 1238 * 1239 * Relax the requirement that the backing buffer is pinned in memory. 1240 */ 1241 void drm_gem_unpin(struct drm_gem_object *obj) 1242 { 1243 if (obj->funcs && obj->funcs->unpin) 1244 obj->funcs->unpin(obj); 1245 else if (obj->dev->driver->gem_prime_unpin) 1246 obj->dev->driver->gem_prime_unpin(obj); 1247 } 1248 EXPORT_SYMBOL(drm_gem_unpin); 1249 1250 /** 1251 * drm_gem_vmap - Map buffer into kernel virtual address space 1252 * @obj: GEM object 1253 * 1254 * Returns: 1255 * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative 1256 * error code on failure. 1257 */ 1258 void *drm_gem_vmap(struct drm_gem_object *obj) 1259 { 1260 void *vaddr; 1261 1262 if (obj->funcs && obj->funcs->vmap) 1263 vaddr = obj->funcs->vmap(obj); 1264 else if (obj->dev->driver->gem_prime_vmap) 1265 vaddr = obj->dev->driver->gem_prime_vmap(obj); 1266 else 1267 vaddr = ERR_PTR(-EOPNOTSUPP); 1268 1269 if (!vaddr) 1270 vaddr = ERR_PTR(-ENOMEM); 1271 1272 return vaddr; 1273 } 1274 EXPORT_SYMBOL(drm_gem_vmap); 1275 1276 /** 1277 * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space 1278 * @obj: GEM object 1279 * @vaddr: Virtual address (can be NULL) 1280 */ 1281 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) 1282 { 1283 if (!vaddr) 1284 return; 1285 1286 if (obj->funcs && obj->funcs->vunmap) 1287 obj->funcs->vunmap(obj, vaddr); 1288 else if (obj->dev->driver->gem_prime_vunmap) 1289 obj->dev->driver->gem_prime_vunmap(obj, vaddr); 1290 } 1291 EXPORT_SYMBOL(drm_gem_vunmap); 1292 1293 /** 1294 * drm_gem_lock_reservations - Sets up the ww context and acquires 1295 * the lock on an array of GEM objects. 1296 * 1297 * Once you've locked your reservations, you'll want to set up space 1298 * for your shared fences (if applicable), submit your job, then 1299 * drm_gem_unlock_reservations(). 1300 * 1301 * @objs: drm_gem_objects to lock 1302 * @count: Number of objects in @objs 1303 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1304 * part of tracking this set of locked reservations. 1305 */ 1306 int 1307 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1308 struct ww_acquire_ctx *acquire_ctx) 1309 { 1310 int contended = -1; 1311 int i, ret; 1312 1313 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1314 1315 retry: 1316 if (contended != -1) { 1317 struct drm_gem_object *obj = objs[contended]; 1318 1319 ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, 1320 acquire_ctx); 1321 if (ret) { 1322 ww_acquire_done(acquire_ctx); 1323 return ret; 1324 } 1325 } 1326 1327 for (i = 0; i < count; i++) { 1328 if (i == contended) 1329 continue; 1330 1331 ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock, 1332 acquire_ctx); 1333 if (ret) { 1334 int j; 1335 1336 for (j = 0; j < i; j++) 1337 ww_mutex_unlock(&objs[j]->resv->lock); 1338 1339 if (contended != -1 && contended >= i) 1340 ww_mutex_unlock(&objs[contended]->resv->lock); 1341 1342 if (ret == -EDEADLK) { 1343 contended = i; 1344 goto retry; 1345 } 1346 1347 ww_acquire_done(acquire_ctx); 1348 return ret; 1349 } 1350 } 1351 1352 ww_acquire_done(acquire_ctx); 1353 1354 return 0; 1355 } 1356 EXPORT_SYMBOL(drm_gem_lock_reservations); 1357 1358 void 1359 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1360 struct ww_acquire_ctx *acquire_ctx) 1361 { 1362 int i; 1363 1364 for (i = 0; i < count; i++) 1365 ww_mutex_unlock(&objs[i]->resv->lock); 1366 1367 ww_acquire_fini(acquire_ctx); 1368 } 1369 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1370 1371 /** 1372 * drm_gem_fence_array_add - Adds the fence to an array of fences to be 1373 * waited on, deduplicating fences from the same context. 1374 * 1375 * @fence_array: array of dma_fence * for the job to block on. 1376 * @fence: the dma_fence to add to the list of dependencies. 1377 * 1378 * Returns: 1379 * 0 on success, or an error on failing to expand the array. 1380 */ 1381 int drm_gem_fence_array_add(struct xarray *fence_array, 1382 struct dma_fence *fence) 1383 { 1384 struct dma_fence *entry; 1385 unsigned long index; 1386 u32 id = 0; 1387 int ret; 1388 1389 if (!fence) 1390 return 0; 1391 1392 /* Deduplicate if we already depend on a fence from the same context. 1393 * This lets the size of the array of deps scale with the number of 1394 * engines involved, rather than the number of BOs. 1395 */ 1396 xa_for_each(fence_array, index, entry) { 1397 if (entry->context != fence->context) 1398 continue; 1399 1400 if (dma_fence_is_later(fence, entry)) { 1401 dma_fence_put(entry); 1402 xa_store(fence_array, index, fence, GFP_KERNEL); 1403 } else { 1404 dma_fence_put(fence); 1405 } 1406 return 0; 1407 } 1408 1409 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); 1410 if (ret != 0) 1411 dma_fence_put(fence); 1412 1413 return ret; 1414 } 1415 EXPORT_SYMBOL(drm_gem_fence_array_add); 1416 1417 /** 1418 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked 1419 * in the GEM object's reservation object to an array of dma_fences for use in 1420 * scheduling a rendering job. 1421 * 1422 * This should be called after drm_gem_lock_reservations() on your array of 1423 * GEM objects used in the job but before updating the reservations with your 1424 * own fences. 1425 * 1426 * @fence_array: array of dma_fence * for the job to block on. 1427 * @obj: the gem object to add new dependencies from. 1428 * @write: whether the job might write the object (so we need to depend on 1429 * shared fences in the reservation object). 1430 */ 1431 int drm_gem_fence_array_add_implicit(struct xarray *fence_array, 1432 struct drm_gem_object *obj, 1433 bool write) 1434 { 1435 int ret; 1436 struct dma_fence **fences; 1437 unsigned int i, fence_count; 1438 1439 if (!write) { 1440 struct dma_fence *fence = 1441 reservation_object_get_excl_rcu(obj->resv); 1442 1443 return drm_gem_fence_array_add(fence_array, fence); 1444 } 1445 1446 ret = reservation_object_get_fences_rcu(obj->resv, NULL, 1447 &fence_count, &fences); 1448 if (ret || !fence_count) 1449 return ret; 1450 1451 for (i = 0; i < fence_count; i++) { 1452 ret = drm_gem_fence_array_add(fence_array, fences[i]); 1453 if (ret) 1454 break; 1455 } 1456 1457 for (; i < fence_count; i++) 1458 dma_fence_put(fences[i]); 1459 kfree(fences); 1460 return ret; 1461 } 1462 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); 1463