1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include <linux/dma-buf.h> 39 #include <linux/dma-buf-map.h> 40 #include <linux/mem_encrypt.h> 41 #include <linux/pagevec.h> 42 43 #include <drm/drm.h> 44 #include <drm/drm_device.h> 45 #include <drm/drm_drv.h> 46 #include <drm/drm_file.h> 47 #include <drm/drm_gem.h> 48 #include <drm/drm_managed.h> 49 #include <drm/drm_print.h> 50 #include <drm/drm_vma_manager.h> 51 52 #include "drm_internal.h" 53 54 /** @file drm_gem.c 55 * 56 * This file provides some of the base ioctls and library routines for 57 * the graphics memory manager implemented by each device driver. 58 * 59 * Because various devices have different requirements in terms of 60 * synchronization and migration strategies, implementing that is left up to 61 * the driver, and all that the general API provides should be generic -- 62 * allocating objects, reading/writing data with the cpu, freeing objects. 63 * Even there, platform-dependent optimizations for reading/writing data with 64 * the CPU mean we'll likely hook those out to driver-specific calls. However, 65 * the DRI2 implementation wants to have at least allocate/mmap be generic. 66 * 67 * The goal was to have swap-backed object allocation managed through 68 * struct file. However, file descriptors as handles to a struct file have 69 * two major failings: 70 * - Process limits prevent more than 1024 or so being used at a time by 71 * default. 72 * - Inability to allocate high fds will aggravate the X Server's select() 73 * handling, and likely that of many GL client applications as well. 74 * 75 * This led to a plan of using our own integer IDs (called handles, following 76 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 77 * ioctls. The objects themselves will still include the struct file so 78 * that we can transition to fds if the required kernel infrastructure shows 79 * up at a later date, and as our interface with shmfs for memory allocation. 80 */ 81 82 static void 83 drm_gem_init_release(struct drm_device *dev, void *ptr) 84 { 85 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 86 } 87 88 /** 89 * drm_gem_init - Initialize the GEM device fields 90 * @dev: drm_devic structure to initialize 91 */ 92 int 93 drm_gem_init(struct drm_device *dev) 94 { 95 struct drm_vma_offset_manager *vma_offset_manager; 96 97 mutex_init(&dev->object_name_lock); 98 idr_init_base(&dev->object_name_idr, 1); 99 100 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 101 GFP_KERNEL); 102 if (!vma_offset_manager) { 103 DRM_ERROR("out of memory\n"); 104 return -ENOMEM; 105 } 106 107 dev->vma_offset_manager = vma_offset_manager; 108 drm_vma_offset_manager_init(vma_offset_manager, 109 DRM_FILE_PAGE_OFFSET_START, 110 DRM_FILE_PAGE_OFFSET_SIZE); 111 112 return drmm_add_action(dev, drm_gem_init_release, NULL); 113 } 114 115 /** 116 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 117 * @dev: drm_device the object should be initialized for 118 * @obj: drm_gem_object to initialize 119 * @size: object size 120 * 121 * Initialize an already allocated GEM object of the specified size with 122 * shmfs backing store. 123 */ 124 int drm_gem_object_init(struct drm_device *dev, 125 struct drm_gem_object *obj, size_t size) 126 { 127 struct file *filp; 128 129 drm_gem_private_object_init(dev, obj, size); 130 131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 132 if (IS_ERR(filp)) 133 return PTR_ERR(filp); 134 135 obj->filp = filp; 136 137 return 0; 138 } 139 EXPORT_SYMBOL(drm_gem_object_init); 140 141 /** 142 * drm_gem_private_object_init - initialize an allocated private GEM object 143 * @dev: drm_device the object should be initialized for 144 * @obj: drm_gem_object to initialize 145 * @size: object size 146 * 147 * Initialize an already allocated GEM object of the specified size with 148 * no GEM provided backing store. Instead the caller is responsible for 149 * backing the object and handling it. 150 */ 151 void drm_gem_private_object_init(struct drm_device *dev, 152 struct drm_gem_object *obj, size_t size) 153 { 154 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 155 156 obj->dev = dev; 157 obj->filp = NULL; 158 159 kref_init(&obj->refcount); 160 obj->handle_count = 0; 161 obj->size = size; 162 dma_resv_init(&obj->_resv); 163 if (!obj->resv) 164 obj->resv = &obj->_resv; 165 166 drm_vma_node_reset(&obj->vma_node); 167 } 168 EXPORT_SYMBOL(drm_gem_private_object_init); 169 170 static void 171 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 172 { 173 /* 174 * Note: obj->dma_buf can't disappear as long as we still hold a 175 * handle reference in obj->handle_count. 176 */ 177 mutex_lock(&filp->prime.lock); 178 if (obj->dma_buf) { 179 drm_prime_remove_buf_handle_locked(&filp->prime, 180 obj->dma_buf); 181 } 182 mutex_unlock(&filp->prime.lock); 183 } 184 185 /** 186 * drm_gem_object_handle_free - release resources bound to userspace handles 187 * @obj: GEM object to clean up. 188 * 189 * Called after the last handle to the object has been closed 190 * 191 * Removes any name for the object. Note that this must be 192 * called before drm_gem_object_free or we'll be touching 193 * freed memory 194 */ 195 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 196 { 197 struct drm_device *dev = obj->dev; 198 199 /* Remove any name for this object */ 200 if (obj->name) { 201 idr_remove(&dev->object_name_idr, obj->name); 202 obj->name = 0; 203 } 204 } 205 206 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 207 { 208 /* Unbreak the reference cycle if we have an exported dma_buf. */ 209 if (obj->dma_buf) { 210 dma_buf_put(obj->dma_buf); 211 obj->dma_buf = NULL; 212 } 213 } 214 215 static void 216 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 217 { 218 struct drm_device *dev = obj->dev; 219 bool final = false; 220 221 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 222 return; 223 224 /* 225 * Must bump handle count first as this may be the last 226 * ref, in which case the object would disappear before we 227 * checked for a name 228 */ 229 230 mutex_lock(&dev->object_name_lock); 231 if (--obj->handle_count == 0) { 232 drm_gem_object_handle_free(obj); 233 drm_gem_object_exported_dma_buf_free(obj); 234 final = true; 235 } 236 mutex_unlock(&dev->object_name_lock); 237 238 if (final) 239 drm_gem_object_put(obj); 240 } 241 242 /* 243 * Called at device or object close to release the file's 244 * handle references on objects. 245 */ 246 static int 247 drm_gem_object_release_handle(int id, void *ptr, void *data) 248 { 249 struct drm_file *file_priv = data; 250 struct drm_gem_object *obj = ptr; 251 252 if (obj->funcs->close) 253 obj->funcs->close(obj, file_priv); 254 255 drm_gem_remove_prime_handles(obj, file_priv); 256 drm_vma_node_revoke(&obj->vma_node, file_priv); 257 258 drm_gem_object_handle_put_unlocked(obj); 259 260 return 0; 261 } 262 263 /** 264 * drm_gem_handle_delete - deletes the given file-private handle 265 * @filp: drm file-private structure to use for the handle look up 266 * @handle: userspace handle to delete 267 * 268 * Removes the GEM handle from the @filp lookup table which has been added with 269 * drm_gem_handle_create(). If this is the last handle also cleans up linked 270 * resources like GEM names. 271 */ 272 int 273 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 274 { 275 struct drm_gem_object *obj; 276 277 spin_lock(&filp->table_lock); 278 279 /* Check if we currently have a reference on the object */ 280 obj = idr_replace(&filp->object_idr, NULL, handle); 281 spin_unlock(&filp->table_lock); 282 if (IS_ERR_OR_NULL(obj)) 283 return -EINVAL; 284 285 /* Release driver's reference and decrement refcount. */ 286 drm_gem_object_release_handle(handle, obj, filp); 287 288 /* And finally make the handle available for future allocations. */ 289 spin_lock(&filp->table_lock); 290 idr_remove(&filp->object_idr, handle); 291 spin_unlock(&filp->table_lock); 292 293 return 0; 294 } 295 EXPORT_SYMBOL(drm_gem_handle_delete); 296 297 /** 298 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 299 * @file: drm file-private structure containing the gem object 300 * @dev: corresponding drm_device 301 * @handle: gem object handle 302 * @offset: return location for the fake mmap offset 303 * 304 * This implements the &drm_driver.dumb_map_offset kms driver callback for 305 * drivers which use gem to manage their backing storage. 306 * 307 * Returns: 308 * 0 on success or a negative error code on failure. 309 */ 310 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 311 u32 handle, u64 *offset) 312 { 313 struct drm_gem_object *obj; 314 int ret; 315 316 obj = drm_gem_object_lookup(file, handle); 317 if (!obj) 318 return -ENOENT; 319 320 /* Don't allow imported objects to be mapped */ 321 if (obj->import_attach) { 322 ret = -EINVAL; 323 goto out; 324 } 325 326 ret = drm_gem_create_mmap_offset(obj); 327 if (ret) 328 goto out; 329 330 *offset = drm_vma_node_offset_addr(&obj->vma_node); 331 out: 332 drm_gem_object_put(obj); 333 334 return ret; 335 } 336 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 337 338 int drm_gem_dumb_destroy(struct drm_file *file, 339 struct drm_device *dev, 340 u32 handle) 341 { 342 return drm_gem_handle_delete(file, handle); 343 } 344 345 /** 346 * drm_gem_handle_create_tail - internal functions to create a handle 347 * @file_priv: drm file-private structure to register the handle for 348 * @obj: object to register 349 * @handlep: pointer to return the created handle to the caller 350 * 351 * This expects the &drm_device.object_name_lock to be held already and will 352 * drop it before returning. Used to avoid races in establishing new handles 353 * when importing an object from either an flink name or a dma-buf. 354 * 355 * Handles must be release again through drm_gem_handle_delete(). This is done 356 * when userspace closes @file_priv for all attached handles, or through the 357 * GEM_CLOSE ioctl for individual handles. 358 */ 359 int 360 drm_gem_handle_create_tail(struct drm_file *file_priv, 361 struct drm_gem_object *obj, 362 u32 *handlep) 363 { 364 struct drm_device *dev = obj->dev; 365 u32 handle; 366 int ret; 367 368 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 369 if (obj->handle_count++ == 0) 370 drm_gem_object_get(obj); 371 372 /* 373 * Get the user-visible handle using idr. Preload and perform 374 * allocation under our spinlock. 375 */ 376 idr_preload(GFP_KERNEL); 377 spin_lock(&file_priv->table_lock); 378 379 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 380 381 spin_unlock(&file_priv->table_lock); 382 idr_preload_end(); 383 384 mutex_unlock(&dev->object_name_lock); 385 if (ret < 0) 386 goto err_unref; 387 388 handle = ret; 389 390 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 391 if (ret) 392 goto err_remove; 393 394 if (obj->funcs->open) { 395 ret = obj->funcs->open(obj, file_priv); 396 if (ret) 397 goto err_revoke; 398 } 399 400 *handlep = handle; 401 return 0; 402 403 err_revoke: 404 drm_vma_node_revoke(&obj->vma_node, file_priv); 405 err_remove: 406 spin_lock(&file_priv->table_lock); 407 idr_remove(&file_priv->object_idr, handle); 408 spin_unlock(&file_priv->table_lock); 409 err_unref: 410 drm_gem_object_handle_put_unlocked(obj); 411 return ret; 412 } 413 414 /** 415 * drm_gem_handle_create - create a gem handle for an object 416 * @file_priv: drm file-private structure to register the handle for 417 * @obj: object to register 418 * @handlep: pointer to return the created handle to the caller 419 * 420 * Create a handle for this object. This adds a handle reference to the object, 421 * which includes a regular reference count. Callers will likely want to 422 * dereference the object afterwards. 423 * 424 * Since this publishes @obj to userspace it must be fully set up by this point, 425 * drivers must call this last in their buffer object creation callbacks. 426 */ 427 int drm_gem_handle_create(struct drm_file *file_priv, 428 struct drm_gem_object *obj, 429 u32 *handlep) 430 { 431 mutex_lock(&obj->dev->object_name_lock); 432 433 return drm_gem_handle_create_tail(file_priv, obj, handlep); 434 } 435 EXPORT_SYMBOL(drm_gem_handle_create); 436 437 438 /** 439 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 440 * @obj: obj in question 441 * 442 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 443 * 444 * Note that drm_gem_object_release() already calls this function, so drivers 445 * don't have to take care of releasing the mmap offset themselves when freeing 446 * the GEM object. 447 */ 448 void 449 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 450 { 451 struct drm_device *dev = obj->dev; 452 453 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 454 } 455 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 456 457 /** 458 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 459 * @obj: obj in question 460 * @size: the virtual size 461 * 462 * GEM memory mapping works by handing back to userspace a fake mmap offset 463 * it can use in a subsequent mmap(2) call. The DRM core code then looks 464 * up the object based on the offset and sets up the various memory mapping 465 * structures. 466 * 467 * This routine allocates and attaches a fake offset for @obj, in cases where 468 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 469 * Otherwise just use drm_gem_create_mmap_offset(). 470 * 471 * This function is idempotent and handles an already allocated mmap offset 472 * transparently. Drivers do not need to check for this case. 473 */ 474 int 475 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 476 { 477 struct drm_device *dev = obj->dev; 478 479 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 480 size / PAGE_SIZE); 481 } 482 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 483 484 /** 485 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 486 * @obj: obj in question 487 * 488 * GEM memory mapping works by handing back to userspace a fake mmap offset 489 * it can use in a subsequent mmap(2) call. The DRM core code then looks 490 * up the object based on the offset and sets up the various memory mapping 491 * structures. 492 * 493 * This routine allocates and attaches a fake offset for @obj. 494 * 495 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 496 * the fake offset again. 497 */ 498 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 499 { 500 return drm_gem_create_mmap_offset_size(obj, obj->size); 501 } 502 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 503 504 /* 505 * Move pages to appropriate lru and release the pagevec, decrementing the 506 * ref count of those pages. 507 */ 508 static void drm_gem_check_release_pagevec(struct pagevec *pvec) 509 { 510 check_move_unevictable_pages(pvec); 511 __pagevec_release(pvec); 512 cond_resched(); 513 } 514 515 /** 516 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 517 * from shmem 518 * @obj: obj in question 519 * 520 * This reads the page-array of the shmem-backing storage of the given gem 521 * object. An array of pages is returned. If a page is not allocated or 522 * swapped-out, this will allocate/swap-in the required pages. Note that the 523 * whole object is covered by the page-array and pinned in memory. 524 * 525 * Use drm_gem_put_pages() to release the array and unpin all pages. 526 * 527 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 528 * If you require other GFP-masks, you have to do those allocations yourself. 529 * 530 * Note that you are not allowed to change gfp-zones during runtime. That is, 531 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 532 * set during initialization. If you have special zone constraints, set them 533 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 534 * to keep pages in the required zone during swap-in. 535 * 536 * This function is only valid on objects initialized with 537 * drm_gem_object_init(), but not for those initialized with 538 * drm_gem_private_object_init() only. 539 */ 540 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 541 { 542 struct address_space *mapping; 543 struct page *p, **pages; 544 struct pagevec pvec; 545 int i, npages; 546 547 548 if (WARN_ON(!obj->filp)) 549 return ERR_PTR(-EINVAL); 550 551 /* This is the shared memory object that backs the GEM resource */ 552 mapping = obj->filp->f_mapping; 553 554 /* We already BUG_ON() for non-page-aligned sizes in 555 * drm_gem_object_init(), so we should never hit this unless 556 * driver author is doing something really wrong: 557 */ 558 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 559 560 npages = obj->size >> PAGE_SHIFT; 561 562 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 563 if (pages == NULL) 564 return ERR_PTR(-ENOMEM); 565 566 mapping_set_unevictable(mapping); 567 568 for (i = 0; i < npages; i++) { 569 p = shmem_read_mapping_page(mapping, i); 570 if (IS_ERR(p)) 571 goto fail; 572 pages[i] = p; 573 574 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 575 * correct region during swapin. Note that this requires 576 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 577 * so shmem can relocate pages during swapin if required. 578 */ 579 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 580 (page_to_pfn(p) >= 0x00100000UL)); 581 } 582 583 return pages; 584 585 fail: 586 mapping_clear_unevictable(mapping); 587 pagevec_init(&pvec); 588 while (i--) { 589 if (!pagevec_add(&pvec, pages[i])) 590 drm_gem_check_release_pagevec(&pvec); 591 } 592 if (pagevec_count(&pvec)) 593 drm_gem_check_release_pagevec(&pvec); 594 595 kvfree(pages); 596 return ERR_CAST(p); 597 } 598 EXPORT_SYMBOL(drm_gem_get_pages); 599 600 /** 601 * drm_gem_put_pages - helper to free backing pages for a GEM object 602 * @obj: obj in question 603 * @pages: pages to free 604 * @dirty: if true, pages will be marked as dirty 605 * @accessed: if true, the pages will be marked as accessed 606 */ 607 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 608 bool dirty, bool accessed) 609 { 610 int i, npages; 611 struct address_space *mapping; 612 struct pagevec pvec; 613 614 mapping = file_inode(obj->filp)->i_mapping; 615 mapping_clear_unevictable(mapping); 616 617 /* We already BUG_ON() for non-page-aligned sizes in 618 * drm_gem_object_init(), so we should never hit this unless 619 * driver author is doing something really wrong: 620 */ 621 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 622 623 npages = obj->size >> PAGE_SHIFT; 624 625 pagevec_init(&pvec); 626 for (i = 0; i < npages; i++) { 627 if (!pages[i]) 628 continue; 629 630 if (dirty) 631 set_page_dirty(pages[i]); 632 633 if (accessed) 634 mark_page_accessed(pages[i]); 635 636 /* Undo the reference we took when populating the table */ 637 if (!pagevec_add(&pvec, pages[i])) 638 drm_gem_check_release_pagevec(&pvec); 639 } 640 if (pagevec_count(&pvec)) 641 drm_gem_check_release_pagevec(&pvec); 642 643 kvfree(pages); 644 } 645 EXPORT_SYMBOL(drm_gem_put_pages); 646 647 static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 648 struct drm_gem_object **objs) 649 { 650 int i, ret = 0; 651 struct drm_gem_object *obj; 652 653 spin_lock(&filp->table_lock); 654 655 for (i = 0; i < count; i++) { 656 /* Check if we currently have a reference on the object */ 657 obj = idr_find(&filp->object_idr, handle[i]); 658 if (!obj) { 659 ret = -ENOENT; 660 break; 661 } 662 drm_gem_object_get(obj); 663 objs[i] = obj; 664 } 665 spin_unlock(&filp->table_lock); 666 667 return ret; 668 } 669 670 /** 671 * drm_gem_objects_lookup - look up GEM objects from an array of handles 672 * @filp: DRM file private date 673 * @bo_handles: user pointer to array of userspace handle 674 * @count: size of handle array 675 * @objs_out: returned pointer to array of drm_gem_object pointers 676 * 677 * Takes an array of userspace handles and returns a newly allocated array of 678 * GEM objects. 679 * 680 * For a single handle lookup, use drm_gem_object_lookup(). 681 * 682 * Returns: 683 * 684 * @objs filled in with GEM object pointers. Returned GEM objects need to be 685 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 686 * failure. 0 is returned on success. 687 * 688 */ 689 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 690 int count, struct drm_gem_object ***objs_out) 691 { 692 int ret; 693 u32 *handles; 694 struct drm_gem_object **objs; 695 696 if (!count) 697 return 0; 698 699 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 700 GFP_KERNEL | __GFP_ZERO); 701 if (!objs) 702 return -ENOMEM; 703 704 *objs_out = objs; 705 706 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 707 if (!handles) { 708 ret = -ENOMEM; 709 goto out; 710 } 711 712 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 713 ret = -EFAULT; 714 DRM_DEBUG("Failed to copy in GEM handles\n"); 715 goto out; 716 } 717 718 ret = objects_lookup(filp, handles, count, objs); 719 out: 720 kvfree(handles); 721 return ret; 722 723 } 724 EXPORT_SYMBOL(drm_gem_objects_lookup); 725 726 /** 727 * drm_gem_object_lookup - look up a GEM object from its handle 728 * @filp: DRM file private date 729 * @handle: userspace handle 730 * 731 * Returns: 732 * 733 * A reference to the object named by the handle if such exists on @filp, NULL 734 * otherwise. 735 * 736 * If looking up an array of handles, use drm_gem_objects_lookup(). 737 */ 738 struct drm_gem_object * 739 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 740 { 741 struct drm_gem_object *obj = NULL; 742 743 objects_lookup(filp, &handle, 1, &obj); 744 return obj; 745 } 746 EXPORT_SYMBOL(drm_gem_object_lookup); 747 748 /** 749 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 750 * shared and/or exclusive fences. 751 * @filep: DRM file private date 752 * @handle: userspace handle 753 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 754 * @timeout: timeout value in jiffies or zero to return immediately 755 * 756 * Returns: 757 * 758 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 759 * greater than 0 on success. 760 */ 761 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 762 bool wait_all, unsigned long timeout) 763 { 764 long ret; 765 struct drm_gem_object *obj; 766 767 obj = drm_gem_object_lookup(filep, handle); 768 if (!obj) { 769 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 770 return -EINVAL; 771 } 772 773 ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); 774 if (ret == 0) 775 ret = -ETIME; 776 else if (ret > 0) 777 ret = 0; 778 779 drm_gem_object_put(obj); 780 781 return ret; 782 } 783 EXPORT_SYMBOL(drm_gem_dma_resv_wait); 784 785 /** 786 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 787 * @dev: drm_device 788 * @data: ioctl data 789 * @file_priv: drm file-private structure 790 * 791 * Releases the handle to an mm object. 792 */ 793 int 794 drm_gem_close_ioctl(struct drm_device *dev, void *data, 795 struct drm_file *file_priv) 796 { 797 struct drm_gem_close *args = data; 798 int ret; 799 800 if (!drm_core_check_feature(dev, DRIVER_GEM)) 801 return -EOPNOTSUPP; 802 803 ret = drm_gem_handle_delete(file_priv, args->handle); 804 805 return ret; 806 } 807 808 /** 809 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 810 * @dev: drm_device 811 * @data: ioctl data 812 * @file_priv: drm file-private structure 813 * 814 * Create a global name for an object, returning the name. 815 * 816 * Note that the name does not hold a reference; when the object 817 * is freed, the name goes away. 818 */ 819 int 820 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 821 struct drm_file *file_priv) 822 { 823 struct drm_gem_flink *args = data; 824 struct drm_gem_object *obj; 825 int ret; 826 827 if (!drm_core_check_feature(dev, DRIVER_GEM)) 828 return -EOPNOTSUPP; 829 830 obj = drm_gem_object_lookup(file_priv, args->handle); 831 if (obj == NULL) 832 return -ENOENT; 833 834 mutex_lock(&dev->object_name_lock); 835 /* prevent races with concurrent gem_close. */ 836 if (obj->handle_count == 0) { 837 ret = -ENOENT; 838 goto err; 839 } 840 841 if (!obj->name) { 842 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 843 if (ret < 0) 844 goto err; 845 846 obj->name = ret; 847 } 848 849 args->name = (uint64_t) obj->name; 850 ret = 0; 851 852 err: 853 mutex_unlock(&dev->object_name_lock); 854 drm_gem_object_put(obj); 855 return ret; 856 } 857 858 /** 859 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl 860 * @dev: drm_device 861 * @data: ioctl data 862 * @file_priv: drm file-private structure 863 * 864 * Open an object using the global name, returning a handle and the size. 865 * 866 * This handle (of course) holds a reference to the object, so the object 867 * will not go away until the handle is deleted. 868 */ 869 int 870 drm_gem_open_ioctl(struct drm_device *dev, void *data, 871 struct drm_file *file_priv) 872 { 873 struct drm_gem_open *args = data; 874 struct drm_gem_object *obj; 875 int ret; 876 u32 handle; 877 878 if (!drm_core_check_feature(dev, DRIVER_GEM)) 879 return -EOPNOTSUPP; 880 881 mutex_lock(&dev->object_name_lock); 882 obj = idr_find(&dev->object_name_idr, (int) args->name); 883 if (obj) { 884 drm_gem_object_get(obj); 885 } else { 886 mutex_unlock(&dev->object_name_lock); 887 return -ENOENT; 888 } 889 890 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 891 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 892 if (ret) 893 goto err; 894 895 args->handle = handle; 896 args->size = obj->size; 897 898 err: 899 drm_gem_object_put(obj); 900 return ret; 901 } 902 903 /** 904 * drm_gem_open - initalizes GEM file-private structures at devnode open time 905 * @dev: drm_device which is being opened by userspace 906 * @file_private: drm file-private structure to set up 907 * 908 * Called at device open time, sets up the structure for handling refcounting 909 * of mm objects. 910 */ 911 void 912 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 913 { 914 idr_init_base(&file_private->object_idr, 1); 915 spin_lock_init(&file_private->table_lock); 916 } 917 918 /** 919 * drm_gem_release - release file-private GEM resources 920 * @dev: drm_device which is being closed by userspace 921 * @file_private: drm file-private structure to clean up 922 * 923 * Called at close time when the filp is going away. 924 * 925 * Releases any remaining references on objects by this filp. 926 */ 927 void 928 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 929 { 930 idr_for_each(&file_private->object_idr, 931 &drm_gem_object_release_handle, file_private); 932 idr_destroy(&file_private->object_idr); 933 } 934 935 /** 936 * drm_gem_object_release - release GEM buffer object resources 937 * @obj: GEM buffer object 938 * 939 * This releases any structures and resources used by @obj and is the invers of 940 * drm_gem_object_init(). 941 */ 942 void 943 drm_gem_object_release(struct drm_gem_object *obj) 944 { 945 WARN_ON(obj->dma_buf); 946 947 if (obj->filp) 948 fput(obj->filp); 949 950 dma_resv_fini(&obj->_resv); 951 drm_gem_free_mmap_offset(obj); 952 } 953 EXPORT_SYMBOL(drm_gem_object_release); 954 955 /** 956 * drm_gem_object_free - free a GEM object 957 * @kref: kref of the object to free 958 * 959 * Called after the last reference to the object has been lost. 960 * 961 * Frees the object 962 */ 963 void 964 drm_gem_object_free(struct kref *kref) 965 { 966 struct drm_gem_object *obj = 967 container_of(kref, struct drm_gem_object, refcount); 968 969 if (WARN_ON(!obj->funcs->free)) 970 return; 971 972 obj->funcs->free(obj); 973 } 974 EXPORT_SYMBOL(drm_gem_object_free); 975 976 /** 977 * drm_gem_object_put_locked - release a GEM buffer object reference 978 * @obj: GEM buffer object 979 * 980 * This releases a reference to @obj. Callers must hold the 981 * &drm_device.struct_mutex lock when calling this function, even when the 982 * driver doesn't use &drm_device.struct_mutex for anything. 983 * 984 * For drivers not encumbered with legacy locking use 985 * drm_gem_object_put() instead. 986 */ 987 void 988 drm_gem_object_put_locked(struct drm_gem_object *obj) 989 { 990 if (obj) { 991 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 992 993 kref_put(&obj->refcount, drm_gem_object_free); 994 } 995 } 996 EXPORT_SYMBOL(drm_gem_object_put_locked); 997 998 /** 999 * drm_gem_vm_open - vma->ops->open implementation for GEM 1000 * @vma: VM area structure 1001 * 1002 * This function implements the #vm_operations_struct open() callback for GEM 1003 * drivers. This must be used together with drm_gem_vm_close(). 1004 */ 1005 void drm_gem_vm_open(struct vm_area_struct *vma) 1006 { 1007 struct drm_gem_object *obj = vma->vm_private_data; 1008 1009 drm_gem_object_get(obj); 1010 } 1011 EXPORT_SYMBOL(drm_gem_vm_open); 1012 1013 /** 1014 * drm_gem_vm_close - vma->ops->close implementation for GEM 1015 * @vma: VM area structure 1016 * 1017 * This function implements the #vm_operations_struct close() callback for GEM 1018 * drivers. This must be used together with drm_gem_vm_open(). 1019 */ 1020 void drm_gem_vm_close(struct vm_area_struct *vma) 1021 { 1022 struct drm_gem_object *obj = vma->vm_private_data; 1023 1024 drm_gem_object_put(obj); 1025 } 1026 EXPORT_SYMBOL(drm_gem_vm_close); 1027 1028 /** 1029 * drm_gem_mmap_obj - memory map a GEM object 1030 * @obj: the GEM object to map 1031 * @obj_size: the object size to be mapped, in bytes 1032 * @vma: VMA for the area to be mapped 1033 * 1034 * Set up the VMA to prepare mapping of the GEM object using the GEM object's 1035 * vm_ops. Depending on their requirements, GEM objects can either 1036 * provide a fault handler in their vm_ops (in which case any accesses to 1037 * the object will be trapped, to perform migration, GTT binding, surface 1038 * register allocation, or performance monitoring), or mmap the buffer memory 1039 * synchronously after calling drm_gem_mmap_obj. 1040 * 1041 * This function is mainly intended to implement the DMABUF mmap operation, when 1042 * the GEM object is not looked up based on its fake offset. To implement the 1043 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1044 * 1045 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1046 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1047 * callers must verify access restrictions before calling this helper. 1048 * 1049 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1050 * size, or if no vm_ops are provided. 1051 */ 1052 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1053 struct vm_area_struct *vma) 1054 { 1055 int ret; 1056 1057 /* Check for valid size. */ 1058 if (obj_size < vma->vm_end - vma->vm_start) 1059 return -EINVAL; 1060 1061 /* Take a ref for this mapping of the object, so that the fault 1062 * handler can dereference the mmap offset's pointer to the object. 1063 * This reference is cleaned up by the corresponding vm_close 1064 * (which should happen whether the vma was created by this call, or 1065 * by a vm_open due to mremap or partial unmap or whatever). 1066 */ 1067 drm_gem_object_get(obj); 1068 1069 vma->vm_private_data = obj; 1070 vma->vm_ops = obj->funcs->vm_ops; 1071 1072 if (obj->funcs->mmap) { 1073 ret = obj->funcs->mmap(obj, vma); 1074 if (ret) 1075 goto err_drm_gem_object_put; 1076 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1077 } else { 1078 if (!vma->vm_ops) { 1079 ret = -EINVAL; 1080 goto err_drm_gem_object_put; 1081 } 1082 1083 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1084 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1085 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1086 } 1087 1088 return 0; 1089 1090 err_drm_gem_object_put: 1091 drm_gem_object_put(obj); 1092 return ret; 1093 } 1094 EXPORT_SYMBOL(drm_gem_mmap_obj); 1095 1096 /** 1097 * drm_gem_mmap - memory map routine for GEM objects 1098 * @filp: DRM file pointer 1099 * @vma: VMA for the area to be mapped 1100 * 1101 * If a driver supports GEM object mapping, mmap calls on the DRM file 1102 * descriptor will end up here. 1103 * 1104 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1105 * contain the fake offset we created when the GTT map ioctl was called on 1106 * the object) and map it with a call to drm_gem_mmap_obj(). 1107 * 1108 * If the caller is not granted access to the buffer object, the mmap will fail 1109 * with EACCES. Please see the vma manager for more information. 1110 */ 1111 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1112 { 1113 struct drm_file *priv = filp->private_data; 1114 struct drm_device *dev = priv->minor->dev; 1115 struct drm_gem_object *obj = NULL; 1116 struct drm_vma_offset_node *node; 1117 int ret; 1118 1119 if (drm_dev_is_unplugged(dev)) 1120 return -ENODEV; 1121 1122 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1123 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1124 vma->vm_pgoff, 1125 vma_pages(vma)); 1126 if (likely(node)) { 1127 obj = container_of(node, struct drm_gem_object, vma_node); 1128 /* 1129 * When the object is being freed, after it hits 0-refcnt it 1130 * proceeds to tear down the object. In the process it will 1131 * attempt to remove the VMA offset and so acquire this 1132 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1133 * that matches our range, we know it is in the process of being 1134 * destroyed and will be freed as soon as we release the lock - 1135 * so we have to check for the 0-refcnted object and treat it as 1136 * invalid. 1137 */ 1138 if (!kref_get_unless_zero(&obj->refcount)) 1139 obj = NULL; 1140 } 1141 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1142 1143 if (!obj) 1144 return -EINVAL; 1145 1146 if (!drm_vma_node_is_allowed(node, priv)) { 1147 drm_gem_object_put(obj); 1148 return -EACCES; 1149 } 1150 1151 if (node->readonly) { 1152 if (vma->vm_flags & VM_WRITE) { 1153 drm_gem_object_put(obj); 1154 return -EINVAL; 1155 } 1156 1157 vma->vm_flags &= ~VM_MAYWRITE; 1158 } 1159 1160 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1161 vma); 1162 1163 drm_gem_object_put(obj); 1164 1165 return ret; 1166 } 1167 EXPORT_SYMBOL(drm_gem_mmap); 1168 1169 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1170 const struct drm_gem_object *obj) 1171 { 1172 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1173 drm_printf_indent(p, indent, "refcount=%u\n", 1174 kref_read(&obj->refcount)); 1175 drm_printf_indent(p, indent, "start=%08lx\n", 1176 drm_vma_node_start(&obj->vma_node)); 1177 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1178 drm_printf_indent(p, indent, "imported=%s\n", 1179 obj->import_attach ? "yes" : "no"); 1180 1181 if (obj->funcs->print_info) 1182 obj->funcs->print_info(p, indent, obj); 1183 } 1184 1185 int drm_gem_pin(struct drm_gem_object *obj) 1186 { 1187 if (obj->funcs->pin) 1188 return obj->funcs->pin(obj); 1189 else 1190 return 0; 1191 } 1192 1193 void drm_gem_unpin(struct drm_gem_object *obj) 1194 { 1195 if (obj->funcs->unpin) 1196 obj->funcs->unpin(obj); 1197 } 1198 1199 int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 1200 { 1201 int ret; 1202 1203 if (!obj->funcs->vmap) 1204 return -EOPNOTSUPP; 1205 1206 ret = obj->funcs->vmap(obj, map); 1207 if (ret) 1208 return ret; 1209 else if (dma_buf_map_is_null(map)) 1210 return -ENOMEM; 1211 1212 return 0; 1213 } 1214 EXPORT_SYMBOL(drm_gem_vmap); 1215 1216 void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) 1217 { 1218 if (dma_buf_map_is_null(map)) 1219 return; 1220 1221 if (obj->funcs->vunmap) 1222 obj->funcs->vunmap(obj, map); 1223 1224 /* Always set the mapping to NULL. Callers may rely on this. */ 1225 dma_buf_map_clear(map); 1226 } 1227 EXPORT_SYMBOL(drm_gem_vunmap); 1228 1229 /** 1230 * drm_gem_lock_reservations - Sets up the ww context and acquires 1231 * the lock on an array of GEM objects. 1232 * 1233 * Once you've locked your reservations, you'll want to set up space 1234 * for your shared fences (if applicable), submit your job, then 1235 * drm_gem_unlock_reservations(). 1236 * 1237 * @objs: drm_gem_objects to lock 1238 * @count: Number of objects in @objs 1239 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1240 * part of tracking this set of locked reservations. 1241 */ 1242 int 1243 drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1244 struct ww_acquire_ctx *acquire_ctx) 1245 { 1246 int contended = -1; 1247 int i, ret; 1248 1249 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1250 1251 retry: 1252 if (contended != -1) { 1253 struct drm_gem_object *obj = objs[contended]; 1254 1255 ret = dma_resv_lock_slow_interruptible(obj->resv, 1256 acquire_ctx); 1257 if (ret) { 1258 ww_acquire_done(acquire_ctx); 1259 return ret; 1260 } 1261 } 1262 1263 for (i = 0; i < count; i++) { 1264 if (i == contended) 1265 continue; 1266 1267 ret = dma_resv_lock_interruptible(objs[i]->resv, 1268 acquire_ctx); 1269 if (ret) { 1270 int j; 1271 1272 for (j = 0; j < i; j++) 1273 dma_resv_unlock(objs[j]->resv); 1274 1275 if (contended != -1 && contended >= i) 1276 dma_resv_unlock(objs[contended]->resv); 1277 1278 if (ret == -EDEADLK) { 1279 contended = i; 1280 goto retry; 1281 } 1282 1283 ww_acquire_done(acquire_ctx); 1284 return ret; 1285 } 1286 } 1287 1288 ww_acquire_done(acquire_ctx); 1289 1290 return 0; 1291 } 1292 EXPORT_SYMBOL(drm_gem_lock_reservations); 1293 1294 void 1295 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1296 struct ww_acquire_ctx *acquire_ctx) 1297 { 1298 int i; 1299 1300 for (i = 0; i < count; i++) 1301 dma_resv_unlock(objs[i]->resv); 1302 1303 ww_acquire_fini(acquire_ctx); 1304 } 1305 EXPORT_SYMBOL(drm_gem_unlock_reservations); 1306 1307 /** 1308 * drm_gem_fence_array_add - Adds the fence to an array of fences to be 1309 * waited on, deduplicating fences from the same context. 1310 * 1311 * @fence_array: array of dma_fence * for the job to block on. 1312 * @fence: the dma_fence to add to the list of dependencies. 1313 * 1314 * Returns: 1315 * 0 on success, or an error on failing to expand the array. 1316 */ 1317 int drm_gem_fence_array_add(struct xarray *fence_array, 1318 struct dma_fence *fence) 1319 { 1320 struct dma_fence *entry; 1321 unsigned long index; 1322 u32 id = 0; 1323 int ret; 1324 1325 if (!fence) 1326 return 0; 1327 1328 /* Deduplicate if we already depend on a fence from the same context. 1329 * This lets the size of the array of deps scale with the number of 1330 * engines involved, rather than the number of BOs. 1331 */ 1332 xa_for_each(fence_array, index, entry) { 1333 if (entry->context != fence->context) 1334 continue; 1335 1336 if (dma_fence_is_later(fence, entry)) { 1337 dma_fence_put(entry); 1338 xa_store(fence_array, index, fence, GFP_KERNEL); 1339 } else { 1340 dma_fence_put(fence); 1341 } 1342 return 0; 1343 } 1344 1345 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); 1346 if (ret != 0) 1347 dma_fence_put(fence); 1348 1349 return ret; 1350 } 1351 EXPORT_SYMBOL(drm_gem_fence_array_add); 1352 1353 /** 1354 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked 1355 * in the GEM object's reservation object to an array of dma_fences for use in 1356 * scheduling a rendering job. 1357 * 1358 * This should be called after drm_gem_lock_reservations() on your array of 1359 * GEM objects used in the job but before updating the reservations with your 1360 * own fences. 1361 * 1362 * @fence_array: array of dma_fence * for the job to block on. 1363 * @obj: the gem object to add new dependencies from. 1364 * @write: whether the job might write the object (so we need to depend on 1365 * shared fences in the reservation object). 1366 */ 1367 int drm_gem_fence_array_add_implicit(struct xarray *fence_array, 1368 struct drm_gem_object *obj, 1369 bool write) 1370 { 1371 int ret; 1372 struct dma_fence **fences; 1373 unsigned int i, fence_count; 1374 1375 if (!write) { 1376 struct dma_fence *fence = 1377 dma_resv_get_excl_unlocked(obj->resv); 1378 1379 return drm_gem_fence_array_add(fence_array, fence); 1380 } 1381 1382 ret = dma_resv_get_fences(obj->resv, NULL, 1383 &fence_count, &fences); 1384 if (ret || !fence_count) 1385 return ret; 1386 1387 for (i = 0; i < fence_count; i++) { 1388 ret = drm_gem_fence_array_add(fence_array, fences[i]); 1389 if (ret) 1390 break; 1391 } 1392 1393 for (; i < fence_count; i++) 1394 dma_fence_put(fences[i]); 1395 kfree(fences); 1396 return ret; 1397 } 1398 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); 1399