1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include <linux/dma-buf.h> 39 #include <linux/mem_encrypt.h> 40 #include <linux/pagevec.h> 41 42 #include <drm/drm.h> 43 #include <drm/drm_device.h> 44 #include <drm/drm_drv.h> 45 #include <drm/drm_file.h> 46 #include <drm/drm_gem.h> 47 #include <drm/drm_managed.h> 48 #include <drm/drm_print.h> 49 #include <drm/drm_vma_manager.h> 50 51 #include "drm_internal.h" 52 53 /** @file drm_gem.c 54 * 55 * This file provides some of the base ioctls and library routines for 56 * the graphics memory manager implemented by each device driver. 57 * 58 * Because various devices have different requirements in terms of 59 * synchronization and migration strategies, implementing that is left up to 60 * the driver, and all that the general API provides should be generic -- 61 * allocating objects, reading/writing data with the cpu, freeing objects. 62 * Even there, platform-dependent optimizations for reading/writing data with 63 * the CPU mean we'll likely hook those out to driver-specific calls. However, 64 * the DRI2 implementation wants to have at least allocate/mmap be generic. 65 * 66 * The goal was to have swap-backed object allocation managed through 67 * struct file. However, file descriptors as handles to a struct file have 68 * two major failings: 69 * - Process limits prevent more than 1024 or so being used at a time by 70 * default. 71 * - Inability to allocate high fds will aggravate the X Server's select() 72 * handling, and likely that of many GL client applications as well. 73 * 74 * This led to a plan of using our own integer IDs (called handles, following 75 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 76 * ioctls. The objects themselves will still include the struct file so 77 * that we can transition to fds if the required kernel infrastructure shows 78 * up at a later date, and as our interface with shmfs for memory allocation. 79 */ 80 81 static void 82 drm_gem_init_release(struct drm_device *dev, void *ptr) 83 { 84 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 85 } 86 87 /** 88 * drm_gem_init - Initialize the GEM device fields 89 * @dev: drm_devic structure to initialize 90 */ 91 int 92 drm_gem_init(struct drm_device *dev) 93 { 94 struct drm_vma_offset_manager *vma_offset_manager; 95 96 mutex_init(&dev->object_name_lock); 97 idr_init_base(&dev->object_name_idr, 1); 98 99 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 100 GFP_KERNEL); 101 if (!vma_offset_manager) { 102 DRM_ERROR("out of memory\n"); 103 return -ENOMEM; 104 } 105 106 dev->vma_offset_manager = vma_offset_manager; 107 drm_vma_offset_manager_init(vma_offset_manager, 108 DRM_FILE_PAGE_OFFSET_START, 109 DRM_FILE_PAGE_OFFSET_SIZE); 110 111 return drmm_add_action(dev, drm_gem_init_release, NULL); 112 } 113 114 /** 115 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 116 * @dev: drm_device the object should be initialized for 117 * @obj: drm_gem_object to initialize 118 * @size: object size 119 * 120 * Initialize an already allocated GEM object of the specified size with 121 * shmfs backing store. 122 */ 123 int drm_gem_object_init(struct drm_device *dev, 124 struct drm_gem_object *obj, size_t size) 125 { 126 struct file *filp; 127 128 drm_gem_private_object_init(dev, obj, size); 129 130 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 131 if (IS_ERR(filp)) 132 return PTR_ERR(filp); 133 134 obj->filp = filp; 135 136 return 0; 137 } 138 EXPORT_SYMBOL(drm_gem_object_init); 139 140 /** 141 * drm_gem_private_object_init - initialize an allocated private GEM object 142 * @dev: drm_device the object should be initialized for 143 * @obj: drm_gem_object to initialize 144 * @size: object size 145 * 146 * Initialize an already allocated GEM object of the specified size with 147 * no GEM provided backing store. Instead the caller is responsible for 148 * backing the object and handling it. 149 */ 150 void drm_gem_private_object_init(struct drm_device *dev, 151 struct drm_gem_object *obj, size_t size) 152 { 153 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 154 155 obj->dev = dev; 156 obj->filp = NULL; 157 158 kref_init(&obj->refcount); 159 obj->handle_count = 0; 160 obj->size = size; 161 dma_resv_init(&obj->_resv); 162 if (!obj->resv) 163 obj->resv = &obj->_resv; 164 165 drm_vma_node_reset(&obj->vma_node); 166 } 167 EXPORT_SYMBOL(drm_gem_private_object_init); 168 169 static void 170 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 171 { 172 /* 173 * Note: obj->dma_buf can't disappear as long as we still hold a 174 * handle reference in obj->handle_count. 175 */ 176 mutex_lock(&filp->prime.lock); 177 if (obj->dma_buf) { 178 drm_prime_remove_buf_handle_locked(&filp->prime, 179 obj->dma_buf); 180 } 181 mutex_unlock(&filp->prime.lock); 182 } 183 184 /** 185 * drm_gem_object_handle_free - release resources bound to userspace handles 186 * @obj: GEM object to clean up. 187 * 188 * Called after the last handle to the object has been closed 189 * 190 * Removes any name for the object. Note that this must be 191 * called before drm_gem_object_free or we'll be touching 192 * freed memory 193 */ 194 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 195 { 196 struct drm_device *dev = obj->dev; 197 198 /* Remove any name for this object */ 199 if (obj->name) { 200 idr_remove(&dev->object_name_idr, obj->name); 201 obj->name = 0; 202 } 203 } 204 205 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 206 { 207 /* Unbreak the reference cycle if we have an exported dma_buf. */ 208 if (obj->dma_buf) { 209 dma_buf_put(obj->dma_buf); 210 obj->dma_buf = NULL; 211 } 212 } 213 214 static void 215 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 216 { 217 struct drm_device *dev = obj->dev; 218 bool final = false; 219 220 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 221 return; 222 223 /* 224 * Must bump handle count first as this may be the last 225 * ref, in which case the object would disappear before we 226 * checked for a name 227 */ 228 229 mutex_lock(&dev->object_name_lock); 230 if (--obj->handle_count == 0) { 231 drm_gem_object_handle_free(obj); 232 drm_gem_object_exported_dma_buf_free(obj); 233 final = true; 234 } 235 mutex_unlock(&dev->object_name_lock); 236 237 if (final) 238 drm_gem_object_put(obj); 239 } 240 241 /* 242 * Called at device or object close to release the file's 243 * handle references on objects. 244 */ 245 static int 246 drm_gem_object_release_handle(int id, void *ptr, void *data) 247 { 248 struct drm_file *file_priv = data; 249 struct drm_gem_object *obj = ptr; 250 struct drm_device *dev = obj->dev; 251 252 if (obj->funcs && obj->funcs->close) 253 obj->funcs->close(obj, file_priv); 254 else if (dev->driver->gem_close_object) 255 dev->driver->gem_close_object(obj, file_priv); 256 257 drm_gem_remove_prime_handles(obj, file_priv); 258 drm_vma_node_revoke(&obj->vma_node, file_priv); 259 260 drm_gem_object_handle_put_unlocked(obj); 261 262 return 0; 263 } 264 265 /** 266 * drm_gem_handle_delete - deletes the given file-private handle 267 * @filp: drm file-private structure to use for the handle look up 268 * @handle: userspace handle to delete 269 * 270 * Removes the GEM handle from the @filp lookup table which has been added with 271 * drm_gem_handle_create(). If this is the last handle also cleans up linked 272 * resources like GEM names. 273 */ 274 int 275 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 276 { 277 struct drm_gem_object *obj; 278 279 spin_lock(&filp->table_lock); 280 281 /* Check if we currently have a reference on the object */ 282 obj = idr_replace(&filp->object_idr, NULL, handle); 283 spin_unlock(&filp->table_lock); 284 if (IS_ERR_OR_NULL(obj)) 285 return -EINVAL; 286 287 /* Release driver's reference and decrement refcount. */ 288 drm_gem_object_release_handle(handle, obj, filp); 289 290 /* And finally make the handle available for future allocations. */ 291 spin_lock(&filp->table_lock); 292 idr_remove(&filp->object_idr, handle); 293 spin_unlock(&filp->table_lock); 294 295 return 0; 296 } 297 EXPORT_SYMBOL(drm_gem_handle_delete); 298 299 /** 300 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 301 * @file: drm file-private structure containing the gem object 302 * @dev: corresponding drm_device 303 * @handle: gem object handle 304 * @offset: return location for the fake mmap offset 305 * 306 * This implements the &drm_driver.dumb_map_offset kms driver callback for 307 * drivers which use gem to manage their backing storage. 308 * 309 * Returns: 310 * 0 on success or a negative error code on failure. 311 */ 312 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 313 u32 handle, u64 *offset) 314 { 315 struct drm_gem_object *obj; 316 int ret; 317 318 obj = drm_gem_object_lookup(file, handle); 319 if (!obj) 320 return -ENOENT; 321 322 /* Don't allow imported objects to be mapped */ 323 if (obj->import_attach) { 324 ret = -EINVAL; 325 goto out; 326 } 327 328 ret = drm_gem_create_mmap_offset(obj); 329 if (ret) 330 goto out; 331 332 *offset = drm_vma_node_offset_addr(&obj->vma_node); 333 out: 334 drm_gem_object_put(obj); 335 336 return ret; 337 } 338 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 339 340 /** 341 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 342 * @file: drm file-private structure to remove the dumb handle from 343 * @dev: corresponding drm_device 344 * @handle: the dumb handle to remove 345 * 346 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers 347 * which use gem to manage their backing storage. 348 */ 349 int drm_gem_dumb_destroy(struct drm_file *file, 350 struct drm_device *dev, 351 uint32_t handle) 352 { 353 return drm_gem_handle_delete(file, handle); 354 } 355 EXPORT_SYMBOL(drm_gem_dumb_destroy); 356 357 /** 358 * drm_gem_handle_create_tail - internal functions to create a handle 359 * @file_priv: drm file-private structure to register the handle for 360 * @obj: object to register 361 * @handlep: pointer to return the created handle to the caller 362 * 363 * This expects the &drm_device.object_name_lock to be held already and will 364 * drop it before returning. Used to avoid races in establishing new handles 365 * when importing an object from either an flink name or a dma-buf. 366 * 367 * Handles must be release again through drm_gem_handle_delete(). This is done 368 * when userspace closes @file_priv for all attached handles, or through the 369 * GEM_CLOSE ioctl for individual handles. 370 */ 371 int 372 drm_gem_handle_create_tail(struct drm_file *file_priv, 373 struct drm_gem_object *obj, 374 u32 *handlep) 375 { 376 struct drm_device *dev = obj->dev; 377 u32 handle; 378 int ret; 379 380 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 381 if (obj->handle_count++ == 0) 382 drm_gem_object_get(obj); 383 384 /* 385 * Get the user-visible handle using idr. Preload and perform 386 * allocation under our spinlock. 387 */ 388 idr_preload(GFP_KERNEL); 389 spin_lock(&file_priv->table_lock); 390 391 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 392 393 spin_unlock(&file_priv->table_lock); 394 idr_preload_end(); 395 396 mutex_unlock(&dev->object_name_lock); 397 if (ret < 0) 398 goto err_unref; 399 400 handle = ret; 401 402 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 403 if (ret) 404 goto err_remove; 405 406 if (obj->funcs && obj->funcs->open) { 407 ret = obj->funcs->open(obj, file_priv); 408 if (ret) 409 goto err_revoke; 410 } else if (dev->driver->gem_open_object) { 411 ret = dev->driver->gem_open_object(obj, file_priv); 412 if (ret) 413 goto err_revoke; 414 } 415 416 *handlep = handle; 417 return 0; 418 419 err_revoke: 420 drm_vma_node_revoke(&obj->vma_node, file_priv); 421 err_remove: 422 spin_lock(&file_priv->table_lock); 423 idr_remove(&file_priv->object_idr, handle); 424 spin_unlock(&file_priv->table_lock); 425 err_unref: 426 drm_gem_object_handle_put_unlocked(obj); 427 return ret; 428 } 429 430 /** 431 * drm_gem_handle_create - create a gem handle for an object 432 * @file_priv: drm file-private structure to register the handle for 433 * @obj: object to register 434 * @handlep: pointer to return the created handle to the caller 435 * 436 * Create a handle for this object. This adds a handle reference to the object, 437 * which includes a regular reference count. Callers will likely want to 438 * dereference the object afterwards. 439 * 440 * Since this publishes @obj to userspace it must be fully set up by this point, 441 * drivers must call this last in their buffer object creation callbacks. 442 */ 443 int drm_gem_handle_create(struct drm_file *file_priv, 444 struct drm_gem_object *obj, 445 u32 *handlep) 446 { 447 mutex_lock(&obj->dev->object_name_lock); 448 449 return drm_gem_handle_create_tail(file_priv, obj, handlep); 450 } 451 EXPORT_SYMBOL(drm_gem_handle_create); 452 453 454 /** 455 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 456 * @obj: obj in question 457 * 458 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 459 * 460 * Note that drm_gem_object_release() already calls this function, so drivers 461 * don't have to take care of releasing the mmap offset themselves when freeing 462 * the GEM object. 463 */ 464 void 465 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 466 { 467 struct drm_device *dev = obj->dev; 468 469 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 470 } 471 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 472 473 /** 474 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 475 * @obj: obj in question 476 * @size: the virtual size 477 * 478 * GEM memory mapping works by handing back to userspace a fake mmap offset 479 * it can use in a subsequent mmap(2) call. The DRM core code then looks 480 * up the object based on the offset and sets up the various memory mapping 481 * structures. 482 * 483 * This routine allocates and attaches a fake offset for @obj, in cases where 484 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 485 * Otherwise just use drm_gem_create_mmap_offset(). 486 * 487 * This function is idempotent and handles an already allocated mmap offset 488 * transparently. Drivers do not need to check for this case. 489 */ 490 int 491 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 492 { 493 struct drm_device *dev = obj->dev; 494 495 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 496 size / PAGE_SIZE); 497 } 498 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 499 500 /** 501 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 502 * @obj: obj in question 503 * 504 * GEM memory mapping works by handing back to userspace a fake mmap offset 505 * it can use in a subsequent mmap(2) call. The DRM core code then looks 506 * up the object based on the offset and sets up the various memory mapping 507 * structures. 508 * 509 * This routine allocates and attaches a fake offset for @obj. 510 * 511 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 512 * the fake offset again. 513 */ 514 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 515 { 516 return drm_gem_create_mmap_offset_size(obj, obj->size); 517 } 518 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 519 520 /* 521 * Move pages to appropriate lru and release the pagevec, decrementing the 522 * ref count of those pages. 523 */ 524 static void drm_gem_check_release_pagevec(struct pagevec *pvec) 525 { 526 check_move_unevictable_pages(pvec); 527 __pagevec_release(pvec); 528 cond_resched(); 529 } 530 531 /** 532 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 533 * from shmem 534 * @obj: obj in question 535 * 536 * This reads the page-array of the shmem-backing storage of the given gem 537 * object. An array of pages is returned. If a page is not allocated or 538 * swapped-out, this will allocate/swap-in the required pages. Note that the 539 * whole object is covered by the page-array and pinned in memory. 540 * 541 * Use drm_gem_put_pages() to release the array and unpin all pages. 542 * 543 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 544 * If you require other GFP-masks, you have to do those allocations yourself. 545 * 546 * Note that you are not allowed to change gfp-zones during runtime. That is, 547 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 548 * set during initialization. If you have special zone constraints, set them 549 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 550 * to keep pages in the required zone during swap-in. 551 * 552 * This function is only valid on objects initialized with 553 * drm_gem_object_init(), but not for those initialized with 554 * drm_gem_private_object_init() only. 555 */ 556 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 557 { 558 struct address_space *mapping; 559 struct page *p, **pages; 560 struct pagevec pvec; 561 int i, npages; 562 563 564 if (WARN_ON(!obj->filp)) 565 return ERR_PTR(-EINVAL); 566 567 /* This is the shared memory object that backs the GEM resource */ 568 mapping = obj->filp->f_mapping; 569 570 /* We already BUG_ON() for non-page-aligned sizes in 571 * drm_gem_object_init(), so we should never hit this unless 572 * driver author is doing something really wrong: 573 */ 574 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 575 576 npages = obj->size >> PAGE_SHIFT; 577 578 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 579 if (pages == NULL) 580 return ERR_PTR(-ENOMEM); 581 582 mapping_set_unevictable(mapping); 583 584 for (i = 0; i < npages; i++) { 585 p = shmem_read_mapping_page(mapping, i); 586 if (IS_ERR(p)) 587 goto fail; 588 pages[i] = p; 589 590 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 591 * correct region during swapin. Note that this requires 592 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 593 * so shmem can relocate pages during swapin if required. 594 */ 595 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 596 (page_to_pfn(p) >= 0x00100000UL)); 597 } 598 599 return pages; 600 601 fail: 602 mapping_clear_unevictable(mapping); 603 pagevec_init(&pvec); 604 while (i--) { 605 if (!pagevec_add(&pvec, pages[i])) 606 drm_gem_check_release_pagevec(&pvec); 607 } 608 if (pagevec_count(&pvec)) 609 drm_gem_check_release_pagevec(&pvec); 610 611 kvfree(pages); 612 return ERR_CAST(p); 613 } 614 EXPORT_SYMBOL(drm_gem_get_pages); 615 616 /** 617 * drm_gem_put_pages - helper to free backing pages for a GEM object 618 * @obj: obj in question 619 * @pages: pages to free 620 * @dirty: if true, pages will be marked as dirty 621 * @accessed: if true, the pages will be marked as accessed 622 */ 623 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 624 bool dirty, bool accessed) 625 { 626 int i, npages; 627 struct address_space *mapping; 628 struct pagevec pvec; 629 630 mapping = file_inode(obj->filp)->i_mapping; 631 mapping_clear_unevictable(mapping); 632 633 /* We already BUG_ON() for non-page-aligned sizes in 634 * drm_gem_object_init(), so we should never hit this unless 635 * driver author is doing something really wrong: 636 */ 637 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 638 639 npages = obj->size >> PAGE_SHIFT; 640 641 pagevec_init(&pvec); 642 for (i = 0; i < npages; i++) { 643 if (!pages[i]) 644 continue; 645 646 if (dirty) 647 set_page_dirty(pages[i]); 648 649 if (accessed) 650 mark_page_accessed(pages[i]); 651 652 /* Undo the reference we took when populating the table */ 653 if (!pagevec_add(&pvec, pages[i])) 654 drm_gem_check_release_pagevec(&pvec); 655 } 656 if (pagevec_count(&pvec)) 657 drm_gem_check_release_pagevec(&pvec); 658 659 kvfree(pages); 660 } 661 EXPORT_SYMBOL(drm_gem_put_pages); 662 663 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 664 struct drm_gem_object **objs) 665 { 666 int i, ret = 0; 667 struct drm_gem_object *obj; 668 669 spin_lock(&filp->table_lock); 670 671 for (i = 0; i < count; i++) { 672 /* Check if we currently have a reference on the object */ 673 obj = idr_find(&filp->object_idr, handle[i]); 674 if (!obj) { 675 ret = -ENOENT; 676 break; 677 } 678 drm_gem_object_get(obj); 679 objs[i] = obj; 680 } 681 spin_unlock(&filp->table_lock); 682 683 return ret; 684 } 685 686 /** 687 * drm_gem_objects_lookup - look up GEM objects from an array of handles 688 * @filp: DRM file private date 689 * @bo_handles: user pointer to array of userspace handle 690 * @count: size of handle array 691 * @objs_out: returned pointer to array of drm_gem_object pointers 692 * 693 * Takes an array of userspace handles and returns a newly allocated array of 694 * GEM objects. 695 * 696 * For a single handle lookup, use drm_gem_object_lookup(). 697 * 698 * Returns: 699 * 700 * @objs filled in with GEM object pointers. Returned GEM objects need to be 701 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 702 * failure. 0 is returned on success. 703 * 704 */ 705 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 706 int count, struct drm_gem_object ***objs_out) 707 { 708 int ret; 709 u32 *handles; 710 struct drm_gem_object **objs; 711 712 if (!count) 713 return 0; 714 715 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 716 GFP_KERNEL | __GFP_ZERO); 717 if (!objs) 718 return -ENOMEM; 719 720 *objs_out = objs; 721 722 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 723 if (!handles) { 724 ret = -ENOMEM; 725 goto out; 726 } 727 728 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 729 ret = -EFAULT; 730 DRM_DEBUG("Failed to copy in GEM handles\n"); 731 goto out; 732 } 733 734 ret = objects_lookup(filp, handles, count, objs); 735 out: 736 kvfree(handles); 737 return ret; 738 739 } 740 EXPORT_SYMBOL(drm_gem_objects_lookup); 741 742 /** 743 * drm_gem_object_lookup - look up a GEM object from its handle 744 * @filp: DRM file private date 745 * @handle: userspace handle 746 * 747 * Returns: 748 * 749 * A reference to the object named by the handle if such exists on @filp, NULL 750 * otherwise. 751 * 752 * If looking up an array of handles, use drm_gem_objects_lookup(). 753 */ 754 struct drm_gem_object * 755 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 756 { 757 struct drm_gem_object *obj = NULL; 758 759 objects_lookup(filp, &handle, 1, &obj); 760 return obj; 761 } 762 EXPORT_SYMBOL(drm_gem_object_lookup); 763 764 /** 765 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 766 * shared and/or exclusive fences. 767 * @filep: DRM file private date 768 * @handle: userspace handle 769 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 770 * @timeout: timeout value in jiffies or zero to return immediately 771 * 772 * Returns: 773 * 774 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 775 * greater than 0 on success. 776 */ 777 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 778 bool wait_all, unsigned long timeout) 779 { 780 long ret; 781 struct drm_gem_object *obj; 782 783 obj = drm_gem_object_lookup(filep, handle); 784 if (!obj) { 785 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 786 return -EINVAL; 787 } 788 789 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, 790 true, timeout); 791 if (ret == 0) 792 ret = -ETIME; 793 else if (ret > 0) 794 ret = 0; 795 796 drm_gem_object_put(obj); 797 798 return ret; 799 } 800 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 801 802 /** 803 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 804 * @dev: drm_device 805 * @data: ioctl data 806 * @file_priv: drm file-private structure 807 * 808 * Releases the handle to an mm object. 809 */ 810 int 811 drm_gem_close_ioctl(struct drm_device *dev, void *data, 812 struct drm_file *file_priv) 813 { 814 struct drm_gem_close *args = data; 815 int ret; 816 817 if (!drm_core_check_feature(dev, DRIVER_GEM)) 818 return -EOPNOTSUPP; 819 820 ret = drm_gem_handle_delete(file_priv, args->handle); 821 822 return ret; 823 } 824 825 /** 826 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 827 * @dev: drm_device 828 * @data: ioctl data 829 * @file_priv: drm file-private structure 830 * 831 * Create a global name for an object, returning the name. 832 * 833 * Note that the name does not hold a reference; when the object 834 * is freed, the name goes away. 835 */ 836 int 837 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 838 struct drm_file *file_priv) 839 { 840 struct drm_gem_flink *args = data; 841 struct drm_gem_object *obj; 842 int ret; 843 844 if (!drm_core_check_feature(dev, DRIVER_GEM)) 845 return -EOPNOTSUPP; 846 847 obj = drm_gem_object_lookup(file_priv, args->handle); 848 if (obj == NULL) 849 return -ENOENT; 850 851 mutex_lock(&dev->object_name_lock); 852 /* prevent races with concurrent gem_close. */ 853 if (obj->handle_count == 0) { 854 ret = -ENOENT; 855 goto err; 856 } 857 858 if (!obj->name) { 859 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 860 if (ret < 0) 861 goto err; 862 863 obj->name = ret; 864 } 865 866 args->name = (uint64_t) obj->name; 867 ret = 0; 868 869 err: 870 mutex_unlock(&dev->object_name_lock); 871 drm_gem_object_put(obj); 872 return ret; 873 } 874 875 /** 876 * drm_gem_open - implementation of the GEM_OPEN ioctl 877 * @dev: drm_device 878 * @data: ioctl data 879 * @file_priv: drm file-private structure 880 * 881 * Open an object using the global name, returning a handle and the size. 882 * 883 * This handle (of course) holds a reference to the object, so the object 884 * will not go away until the handle is deleted. 885 */ 886 int 887 drm_gem_open_ioctl(struct drm_device *dev, void *data, 888 struct drm_file *file_priv) 889 { 890 struct drm_gem_open *args = data; 891 struct drm_gem_object *obj; 892 int ret; 893 u32 handle; 894 895 if (!drm_core_check_feature(dev, DRIVER_GEM)) 896 return -EOPNOTSUPP; 897 898 mutex_lock(&dev->object_name_lock); 899 obj = idr_find(&dev->object_name_idr, (int) args->name); 900 if (obj) { 901 drm_gem_object_get(obj); 902 } else { 903 mutex_unlock(&dev->object_name_lock); 904 return -ENOENT; 905 } 906 907 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 908 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 909 drm_gem_object_put(obj); 910 if (ret) 911 return ret; 912 913 args->handle = handle; 914 args->size = obj->size; 915 916 return 0; 917 } 918 919 /** 920 * gem_gem_open - initalizes GEM file-private structures at devnode open time 921 * @dev: drm_device which is being opened by userspace 922 * @file_private: drm file-private structure to set up 923 * 924 * Called at device open time, sets up the structure for handling refcounting 925 * of mm objects. 926 */ 927 void 928 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 929 { 930 idr_init_base(&file_private->object_idr, 1); 931 spin_lock_init(&file_private->table_lock); 932 } 933 934 /** 935 * drm_gem_release - release file-private GEM resources 936 * @dev: drm_device which is being closed by userspace 937 * @file_private: drm file-private structure to clean up 938 * 939 * Called at close time when the filp is going away. 940 * 941 * Releases any remaining references on objects by this filp. 942 */ 943 void 944 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 945 { 946 idr_for_each(&file_private->object_idr, 947 &drm_gem_object_release_handle, file_private); 948 idr_destroy(&file_private->object_idr); 949 } 950 951 /** 952 * drm_gem_object_release - release GEM buffer object resources 953 * @obj: GEM buffer object 954 * 955 * This releases any structures and resources used by @obj and is the invers of 956 * drm_gem_object_init(). 957 */ 958 void 959 drm_gem_object_release(struct drm_gem_object *obj) 960 { 961 WARN_ON(obj->dma_buf); 962 963 if (obj->filp) 964 fput(obj->filp); 965 966 dma_resv_fini(&obj->_resv); 967 drm_gem_free_mmap_offset(obj); 968 } 969 EXPORT_SYMBOL(drm_gem_object_release); 970 971 /** 972 * drm_gem_object_free - free a GEM object 973 * @kref: kref of the object to free 974 * 975 * Called after the last reference to the object has been lost. 976 * 977 * Frees the object 978 */ 979 void 980 drm_gem_object_free(struct kref *kref) 981 { 982 struct drm_gem_object *obj = 983 container_of(kref, struct drm_gem_object, refcount); 984 struct drm_device *dev = obj->dev; 985 986 if (obj->funcs) 987 obj->funcs->free(obj); 988 else if (dev->driver->gem_free_object_unlocked) 989 dev->driver->gem_free_object_unlocked(obj); 990 } 991 EXPORT_SYMBOL(drm_gem_object_free); 992 993 /** 994 * drm_gem_object_put_locked - release a GEM buffer object reference 995 * @obj: GEM buffer object 996 * 997 * This releases a reference to @obj. Callers must hold the 998 * &drm_device.struct_mutex lock when calling this function, even when the 999 * driver doesn't use &drm_device.struct_mutex for anything. 1000 * 1001 * For drivers not encumbered with legacy locking use 1002 * drm_gem_object_put() instead. 1003 */ 1004 void 1005 drm_gem_object_put_locked(struct drm_gem_object *obj) 1006 { 1007 if (obj) { 1008 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 1009 1010 kref_put(&obj->refcount, drm_gem_object_free); 1011 } 1012 } 1013 EXPORT_SYMBOL(drm_gem_object_put_locked); 1014 1015 /** 1016 * drm_gem_vm_open - vma->ops->open implementation for GEM 1017 * @vma: VM area structure 1018 * 1019 * This function implements the #vm_operations_struct open() callback for GEM 1020 * drivers. This must be used together with drm_gem_vm_close(). 1021 */ 1022 void drm_gem_vm_open(struct vm_area_struct *vma) 1023 { 1024 struct drm_gem_object *obj = vma->vm_private_data; 1025 1026 drm_gem_object_get(obj); 1027 } 1028 EXPORT_SYMBOL(drm_gem_vm_open); 1029 1030 /** 1031 * drm_gem_vm_close - vma->ops->close implementation for GEM 1032 * @vma: VM area structure 1033 * 1034 * This function implements the #vm_operations_struct close() callback for GEM 1035 * drivers. This must be used together with drm_gem_vm_open(). 1036 */ 1037 void drm_gem_vm_close(struct vm_area_struct *vma) 1038 { 1039 struct drm_gem_object *obj = vma->vm_private_data; 1040 1041 drm_gem_object_put(obj); 1042 } 1043 EXPORT_SYMBOL(drm_gem_vm_close); 1044 1045 /** 1046 * drm_gem_mmap_obj - memory map a GEM object 1047 * @obj: the GEM object to map 1048 * @obj_size: the object size to be mapped, in bytes 1049 * @vma: VMA for the area to be mapped 1050 * 1051 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops 1052 * provided by the driver. Depending on their requirements, drivers can either 1053 * provide a fault handler in their gem_vm_ops (in which case any accesses to 1054 * the object will be trapped, to perform migration, GTT binding, surface 1055 * register allocation, or performance monitoring), or mmap the buffer memory 1056 * synchronously after calling drm_gem_mmap_obj. 1057 * 1058 * This function is mainly intended to implement the DMABUF mmap operation, when 1059 * the GEM object is not looked up based on its fake offset. To implement the 1060 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1061 * 1062 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1063 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1064 * callers must verify access restrictions before calling this helper. 1065 * 1066 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1067 * size, or if no gem_vm_ops are provided. 1068 */ 1069 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1070 struct vm_area_struct *vma) 1071 { 1072 struct drm_device *dev = obj->dev; 1073 int ret; 1074 1075 /* Check for valid size. */ 1076 if (obj_size < vma->vm_end - vma->vm_start) 1077 return -EINVAL; 1078 1079 /* Take a ref for this mapping of the object, so that the fault 1080 * handler can dereference the mmap offset's pointer to the object. 1081 * This reference is cleaned up by the corresponding vm_close 1082 * (which should happen whether the vma was created by this call, or 1083 * by a vm_open due to mremap or partial unmap or whatever). 1084 */ 1085 drm_gem_object_get(obj); 1086 1087 if (obj->funcs && obj->funcs->mmap) { 1088 ret = obj->funcs->mmap(obj, vma); 1089 if (ret) { 1090 drm_gem_object_put(obj); 1091 return ret; 1092 } 1093 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1094 } else { 1095 if (obj->funcs && obj->funcs->vm_ops) 1096 vma->vm_ops = obj->funcs->vm_ops; 1097 else if (dev->driver->gem_vm_ops) 1098 vma->vm_ops = dev->driver->gem_vm_ops; 1099 else { 1100 drm_gem_object_put(obj); 1101 return -EINVAL; 1102 } 1103 1104 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1105 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1106 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1107 } 1108 1109 vma->vm_private_data = obj; 1110 1111 return 0; 1112 } 1113 EXPORT_SYMBOL(drm_gem_mmap_obj); 1114 1115 /** 1116 * drm_gem_mmap - memory map routine for GEM objects 1117 * @filp: DRM file pointer 1118 * @vma: VMA for the area to be mapped 1119 * 1120 * If a driver supports GEM object mapping, mmap calls on the DRM file 1121 * descriptor will end up here. 1122 * 1123 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1124 * contain the fake offset we created when the GTT map ioctl was called on 1125 * the object) and map it with a call to drm_gem_mmap_obj(). 1126 * 1127 * If the caller is not granted access to the buffer object, the mmap will fail 1128 * with EACCES. Please see the vma manager for more information. 1129 */ 1130 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1131 { 1132 struct drm_file *priv = filp->private_data; 1133 struct drm_device *dev = priv->minor->dev; 1134 struct drm_gem_object *obj = NULL; 1135 struct drm_vma_offset_node *node; 1136 int ret; 1137 1138 if (drm_dev_is_unplugged(dev)) 1139 return -ENODEV; 1140 1141 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1142 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1143 vma->vm_pgoff, 1144 vma_pages(vma)); 1145 if (likely(node)) { 1146 obj = container_of(node, struct drm_gem_object, vma_node); 1147 /* 1148 * When the object is being freed, after it hits 0-refcnt it 1149 * proceeds to tear down the object. In the process it will 1150 * attempt to remove the VMA offset and so acquire this 1151 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1152 * that matches our range, we know it is in the process of being 1153 * destroyed and will be freed as soon as we release the lock - 1154 * so we have to check for the 0-refcnted object and treat it as 1155 * invalid. 1156 */ 1157 if (!kref_get_unless_zero(&obj->refcount)) 1158 obj = NULL; 1159 } 1160 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1161 1162 if (!obj) 1163 return -EINVAL; 1164 1165 if (!drm_vma_node_is_allowed(node, priv)) { 1166 drm_gem_object_put(obj); 1167 return -EACCES; 1168 } 1169 1170 if (node->readonly) { 1171 if (vma->vm_flags & VM_WRITE) { 1172 drm_gem_object_put(obj); 1173 return -EINVAL; 1174 } 1175 1176 vma->vm_flags &= ~VM_MAYWRITE; 1177 } 1178 1179 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1180 vma); 1181 1182 drm_gem_object_put(obj); 1183 1184 return ret; 1185 } 1186 EXPORT_SYMBOL(drm_gem_mmap); 1187 1188 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1189 const struct drm_gem_object *obj) 1190 { 1191 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1192 drm_printf_indent(p, indent, "refcount=%u\n", 1193 kref_read(&obj->refcount)); 1194 drm_printf_indent(p, indent, "start=%08lx\n", 1195 drm_vma_node_start(&obj->vma_node)); 1196 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1197 drm_printf_indent(p, indent, "imported=%s\n", 1198 obj->import_attach ? "yes" : "no"); 1199 1200 if (obj->funcs && obj->funcs->print_info) 1201 obj->funcs->print_info(p, indent, obj); 1202 } 1203 1204 int drm_gem_pin(struct drm_gem_object *obj) 1205 { 1206 if (obj->funcs && obj->funcs->pin) 1207 return obj->funcs->pin(obj); 1208 else if (obj->dev->driver->gem_prime_pin) 1209 return obj->dev->driver->gem_prime_pin(obj); 1210 else 1211 return 0; 1212 } 1213 1214 void drm_gem_unpin(struct drm_gem_object *obj) 1215 { 1216 if (obj->funcs && obj->funcs->unpin) 1217 obj->funcs->unpin(obj); 1218 else if (obj->dev->driver->gem_prime_unpin) 1219 obj->dev->driver->gem_prime_unpin(obj); 1220 } 1221 1222 void *drm_gem_vmap(struct drm_gem_object *obj) 1223 { 1224 void *vaddr; 1225 1226 if (obj->funcs && obj->funcs->vmap) 1227 vaddr = obj->funcs->vmap(obj); 1228 else if (obj->dev->driver->gem_prime_vmap) 1229 vaddr = obj->dev->driver->gem_prime_vmap(obj); 1230 else 1231 vaddr = ERR_PTR(-EOPNOTSUPP); 1232 1233 if (!vaddr) 1234 vaddr = ERR_PTR(-ENOMEM); 1235 1236 return vaddr; 1237 } 1238 1239 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) 1240 { 1241 if (!vaddr) 1242 return; 1243 1244 if (obj->funcs && obj->funcs->vunmap) 1245 obj->funcs->vunmap(obj, vaddr); 1246 else if (obj->dev->driver->gem_prime_vunmap) 1247 obj->dev->driver->gem_prime_vunmap(obj, vaddr); 1248 } 1249 1250 /** 1251 * drm_gem_lock_reservations - Sets up the ww context and acquires 1252 * the lock on an array of GEM objects. 1253 * 1254 * Once you've locked your reservations, you'll want to set up space 1255 * for your shared fences (if applicable), submit your job, then 1256 * drm_gem_unlock_reservations(). 1257 * 1258 * @objs: drm_gem_objects to lock 1259 * @count: Number of objects in @objs 1260 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1261 * part of tracking this set of locked reservations. 1262 */ 1263 int 1264 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1265 struct ww_acquire_ctx *acquire_ctx) 1266 { 1267 int contended = -1; 1268 int i, ret; 1269 1270 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1271 1272 retry: 1273 if (contended != -1) { 1274 struct drm_gem_object *obj = objs[contended]; 1275 1276 ret = dma_resv_lock_slow_interruptible(obj->resv, 1277 acquire_ctx); 1278 if (ret) { 1279 ww_acquire_done(acquire_ctx); 1280 return ret; 1281 } 1282 } 1283 1284 for (i = 0; i < count; i++) { 1285 if (i == contended) 1286 continue; 1287 1288 ret = dma_resv_lock_interruptible(objs[i]->resv, 1289 acquire_ctx); 1290 if (ret) { 1291 int j; 1292 1293 for (j = 0; j < i; j++) 1294 dma_resv_unlock(objs[j]->resv); 1295 1296 if (contended != -1 && contended >= i) 1297 dma_resv_unlock(objs[contended]->resv); 1298 1299 if (ret == -EDEADLK) { 1300 contended = i; 1301 goto retry; 1302 } 1303 1304 ww_acquire_done(acquire_ctx); 1305 return ret; 1306 } 1307 } 1308 1309 ww_acquire_done(acquire_ctx); 1310 1311 return 0; 1312 } 1313 EXPORT_SYMBOL(drm_gem_lock_reservations); 1314 1315 void 1316 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1317 struct ww_acquire_ctx *acquire_ctx) 1318 { 1319 int i; 1320 1321 for (i = 0; i < count; i++) 1322 dma_resv_unlock(objs[i]->resv); 1323 1324 ww_acquire_fini(acquire_ctx); 1325 } 1326 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1327 1328 /** 1329 * drm_gem_fence_array_add - Adds the fence to an array of fences to be 1330 * waited on, deduplicating fences from the same context. 1331 * 1332 * @fence_array: array of dma_fence * for the job to block on. 1333 * @fence: the dma_fence to add to the list of dependencies. 1334 * 1335 * Returns: 1336 * 0 on success, or an error on failing to expand the array. 1337 */ 1338 int drm_gem_fence_array_add(struct xarray *fence_array, 1339 struct dma_fence *fence) 1340 { 1341 struct dma_fence *entry; 1342 unsigned long index; 1343 u32 id = 0; 1344 int ret; 1345 1346 if (!fence) 1347 return 0; 1348 1349 /* Deduplicate if we already depend on a fence from the same context. 1350 * This lets the size of the array of deps scale with the number of 1351 * engines involved, rather than the number of BOs. 1352 */ 1353 xa_for_each(fence_array, index, entry) { 1354 if (entry->context != fence->context) 1355 continue; 1356 1357 if (dma_fence_is_later(fence, entry)) { 1358 dma_fence_put(entry); 1359 xa_store(fence_array, index, fence, GFP_KERNEL); 1360 } else { 1361 dma_fence_put(fence); 1362 } 1363 return 0; 1364 } 1365 1366 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); 1367 if (ret != 0) 1368 dma_fence_put(fence); 1369 1370 return ret; 1371 } 1372 EXPORT_SYMBOL(drm_gem_fence_array_add); 1373 1374 /** 1375 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked 1376 * in the GEM object's reservation object to an array of dma_fences for use in 1377 * scheduling a rendering job. 1378 * 1379 * This should be called after drm_gem_lock_reservations() on your array of 1380 * GEM objects used in the job but before updating the reservations with your 1381 * own fences. 1382 * 1383 * @fence_array: array of dma_fence * for the job to block on. 1384 * @obj: the gem object to add new dependencies from. 1385 * @write: whether the job might write the object (so we need to depend on 1386 * shared fences in the reservation object). 1387 */ 1388 int drm_gem_fence_array_add_implicit(struct xarray *fence_array, 1389 struct drm_gem_object *obj, 1390 bool write) 1391 { 1392 int ret; 1393 struct dma_fence **fences; 1394 unsigned int i, fence_count; 1395 1396 if (!write) { 1397 struct dma_fence *fence = 1398 dma_resv_get_excl_rcu(obj->resv); 1399 1400 return drm_gem_fence_array_add(fence_array, fence); 1401 } 1402 1403 ret = dma_resv_get_fences_rcu(obj->resv, NULL, 1404 &fence_count, &fences); 1405 if (ret || !fence_count) 1406 return ret; 1407 1408 for (i = 0; i < fence_count; i++) { 1409 ret = drm_gem_fence_array_add(fence_array, fences[i]); 1410 if (ret) 1411 break; 1412 } 1413 1414 for (; i < fence_count; i++) 1415 dma_fence_put(fences[i]); 1416 kfree(fences); 1417 return ret; 1418 } 1419 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); 1420