1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include <linux/dma-buf.h> 39 #include <linux/mem_encrypt.h> 40 #include <linux/pagevec.h> 41 #include <drm/drmP.h> 42 #include <drm/drm_vma_manager.h> 43 #include <drm/drm_gem.h> 44 #include <drm/drm_print.h> 45 #include "drm_internal.h" 46 47 /** @file drm_gem.c 48 * 49 * This file provides some of the base ioctls and library routines for 50 * the graphics memory manager implemented by each device driver. 51 * 52 * Because various devices have different requirements in terms of 53 * synchronization and migration strategies, implementing that is left up to 54 * the driver, and all that the general API provides should be generic -- 55 * allocating objects, reading/writing data with the cpu, freeing objects. 56 * Even there, platform-dependent optimizations for reading/writing data with 57 * the CPU mean we'll likely hook those out to driver-specific calls. However, 58 * the DRI2 implementation wants to have at least allocate/mmap be generic. 59 * 60 * The goal was to have swap-backed object allocation managed through 61 * struct file. However, file descriptors as handles to a struct file have 62 * two major failings: 63 * - Process limits prevent more than 1024 or so being used at a time by 64 * default. 65 * - Inability to allocate high fds will aggravate the X Server's select() 66 * handling, and likely that of many GL client applications as well. 67 * 68 * This led to a plan of using our own integer IDs (called handles, following 69 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 70 * ioctls. The objects themselves will still include the struct file so 71 * that we can transition to fds if the required kernel infrastructure shows 72 * up at a later date, and as our interface with shmfs for memory allocation. 73 */ 74 75 /* 76 * We make up offsets for buffer objects so we can recognize them at 77 * mmap time. 78 */ 79 80 /* pgoff in mmap is an unsigned long, so we need to make sure that 81 * the faked up offset will fit 82 */ 83 84 #if BITS_PER_LONG == 64 85 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 86 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 87 #else 88 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 89 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 90 #endif 91 92 /** 93 * drm_gem_init - Initialize the GEM device fields 94 * @dev: drm_devic structure to initialize 95 */ 96 int 97 drm_gem_init(struct drm_device *dev) 98 { 99 struct drm_vma_offset_manager *vma_offset_manager; 100 101 mutex_init(&dev->object_name_lock); 102 idr_init_base(&dev->object_name_idr, 1); 103 104 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); 105 if (!vma_offset_manager) { 106 DRM_ERROR("out of memory\n"); 107 return -ENOMEM; 108 } 109 110 dev->vma_offset_manager = vma_offset_manager; 111 drm_vma_offset_manager_init(vma_offset_manager, 112 DRM_FILE_PAGE_OFFSET_START, 113 DRM_FILE_PAGE_OFFSET_SIZE); 114 115 return 0; 116 } 117 118 void 119 drm_gem_destroy(struct drm_device *dev) 120 { 121 122 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 123 kfree(dev->vma_offset_manager); 124 dev->vma_offset_manager = NULL; 125 } 126 127 /** 128 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 129 * @dev: drm_device the object should be initialized for 130 * @obj: drm_gem_object to initialize 131 * @size: object size 132 * 133 * Initialize an already allocated GEM object of the specified size with 134 * shmfs backing store. 135 */ 136 int drm_gem_object_init(struct drm_device *dev, 137 struct drm_gem_object *obj, size_t size) 138 { 139 struct file *filp; 140 141 drm_gem_private_object_init(dev, obj, size); 142 143 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 144 if (IS_ERR(filp)) 145 return PTR_ERR(filp); 146 147 obj->filp = filp; 148 149 return 0; 150 } 151 EXPORT_SYMBOL(drm_gem_object_init); 152 153 /** 154 * drm_gem_private_object_init - initialize an allocated private GEM object 155 * @dev: drm_device the object should be initialized for 156 * @obj: drm_gem_object to initialize 157 * @size: object size 158 * 159 * Initialize an already allocated GEM object of the specified size with 160 * no GEM provided backing store. Instead the caller is responsible for 161 * backing the object and handling it. 162 */ 163 void drm_gem_private_object_init(struct drm_device *dev, 164 struct drm_gem_object *obj, size_t size) 165 { 166 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 167 168 obj->dev = dev; 169 obj->filp = NULL; 170 171 kref_init(&obj->refcount); 172 obj->handle_count = 0; 173 obj->size = size; 174 drm_vma_node_reset(&obj->vma_node); 175 } 176 EXPORT_SYMBOL(drm_gem_private_object_init); 177 178 static void 179 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 180 { 181 /* 182 * Note: obj->dma_buf can't disappear as long as we still hold a 183 * handle reference in obj->handle_count. 184 */ 185 mutex_lock(&filp->prime.lock); 186 if (obj->dma_buf) { 187 drm_prime_remove_buf_handle_locked(&filp->prime, 188 obj->dma_buf); 189 } 190 mutex_unlock(&filp->prime.lock); 191 } 192 193 /** 194 * drm_gem_object_handle_free - release resources bound to userspace handles 195 * @obj: GEM object to clean up. 196 * 197 * Called after the last handle to the object has been closed 198 * 199 * Removes any name for the object. Note that this must be 200 * called before drm_gem_object_free or we'll be touching 201 * freed memory 202 */ 203 static void drm_gem_object_handle_free(struct drm_gem_object *obj) 204 { 205 struct drm_device *dev = obj->dev; 206 207 /* Remove any name for this object */ 208 if (obj->name) { 209 idr_remove(&dev->object_name_idr, obj->name); 210 obj->name = 0; 211 } 212 } 213 214 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 215 { 216 /* Unbreak the reference cycle if we have an exported dma_buf. */ 217 if (obj->dma_buf) { 218 dma_buf_put(obj->dma_buf); 219 obj->dma_buf = NULL; 220 } 221 } 222 223 static void 224 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 225 { 226 struct drm_device *dev = obj->dev; 227 bool final = false; 228 229 if (WARN_ON(obj->handle_count == 0)) 230 return; 231 232 /* 233 * Must bump handle count first as this may be the last 234 * ref, in which case the object would disappear before we 235 * checked for a name 236 */ 237 238 mutex_lock(&dev->object_name_lock); 239 if (--obj->handle_count == 0) { 240 drm_gem_object_handle_free(obj); 241 drm_gem_object_exported_dma_buf_free(obj); 242 final = true; 243 } 244 mutex_unlock(&dev->object_name_lock); 245 246 if (final) 247 drm_gem_object_put_unlocked(obj); 248 } 249 250 /* 251 * Called at device or object close to release the file's 252 * handle references on objects. 253 */ 254 static int 255 drm_gem_object_release_handle(int id, void *ptr, void *data) 256 { 257 struct drm_file *file_priv = data; 258 struct drm_gem_object *obj = ptr; 259 struct drm_device *dev = obj->dev; 260 261 if (obj->funcs && obj->funcs->close) 262 obj->funcs->close(obj, file_priv); 263 else if (dev->driver->gem_close_object) 264 dev->driver->gem_close_object(obj, file_priv); 265 266 if (drm_core_check_feature(dev, DRIVER_PRIME)) 267 drm_gem_remove_prime_handles(obj, file_priv); 268 drm_vma_node_revoke(&obj->vma_node, file_priv); 269 270 drm_gem_object_handle_put_unlocked(obj); 271 272 return 0; 273 } 274 275 /** 276 * drm_gem_handle_delete - deletes the given file-private handle 277 * @filp: drm file-private structure to use for the handle look up 278 * @handle: userspace handle to delete 279 * 280 * Removes the GEM handle from the @filp lookup table which has been added with 281 * drm_gem_handle_create(). If this is the last handle also cleans up linked 282 * resources like GEM names. 283 */ 284 int 285 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 286 { 287 struct drm_gem_object *obj; 288 289 spin_lock(&filp->table_lock); 290 291 /* Check if we currently have a reference on the object */ 292 obj = idr_replace(&filp->object_idr, NULL, handle); 293 spin_unlock(&filp->table_lock); 294 if (IS_ERR_OR_NULL(obj)) 295 return -EINVAL; 296 297 /* Release driver's reference and decrement refcount. */ 298 drm_gem_object_release_handle(handle, obj, filp); 299 300 /* And finally make the handle available for future allocations. */ 301 spin_lock(&filp->table_lock); 302 idr_remove(&filp->object_idr, handle); 303 spin_unlock(&filp->table_lock); 304 305 return 0; 306 } 307 EXPORT_SYMBOL(drm_gem_handle_delete); 308 309 /** 310 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 311 * @file: drm file-private structure containing the gem object 312 * @dev: corresponding drm_device 313 * @handle: gem object handle 314 * @offset: return location for the fake mmap offset 315 * 316 * This implements the &drm_driver.dumb_map_offset kms driver callback for 317 * drivers which use gem to manage their backing storage. 318 * 319 * Returns: 320 * 0 on success or a negative error code on failure. 321 */ 322 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 323 u32 handle, u64 *offset) 324 { 325 struct drm_gem_object *obj; 326 int ret; 327 328 obj = drm_gem_object_lookup(file, handle); 329 if (!obj) 330 return -ENOENT; 331 332 /* Don't allow imported objects to be mapped */ 333 if (obj->import_attach) { 334 ret = -EINVAL; 335 goto out; 336 } 337 338 ret = drm_gem_create_mmap_offset(obj); 339 if (ret) 340 goto out; 341 342 *offset = drm_vma_node_offset_addr(&obj->vma_node); 343 out: 344 drm_gem_object_put_unlocked(obj); 345 346 return ret; 347 } 348 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 349 350 /** 351 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 352 * @file: drm file-private structure to remove the dumb handle from 353 * @dev: corresponding drm_device 354 * @handle: the dumb handle to remove 355 * 356 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers 357 * which use gem to manage their backing storage. 358 */ 359 int drm_gem_dumb_destroy(struct drm_file *file, 360 struct drm_device *dev, 361 uint32_t handle) 362 { 363 return drm_gem_handle_delete(file, handle); 364 } 365 EXPORT_SYMBOL(drm_gem_dumb_destroy); 366 367 /** 368 * drm_gem_handle_create_tail - internal functions to create a handle 369 * @file_priv: drm file-private structure to register the handle for 370 * @obj: object to register 371 * @handlep: pointer to return the created handle to the caller 372 * 373 * This expects the &drm_device.object_name_lock to be held already and will 374 * drop it before returning. Used to avoid races in establishing new handles 375 * when importing an object from either an flink name or a dma-buf. 376 * 377 * Handles must be release again through drm_gem_handle_delete(). This is done 378 * when userspace closes @file_priv for all attached handles, or through the 379 * GEM_CLOSE ioctl for individual handles. 380 */ 381 int 382 drm_gem_handle_create_tail(struct drm_file *file_priv, 383 struct drm_gem_object *obj, 384 u32 *handlep) 385 { 386 struct drm_device *dev = obj->dev; 387 u32 handle; 388 int ret; 389 390 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 391 if (obj->handle_count++ == 0) 392 drm_gem_object_get(obj); 393 394 /* 395 * Get the user-visible handle using idr. Preload and perform 396 * allocation under our spinlock. 397 */ 398 idr_preload(GFP_KERNEL); 399 spin_lock(&file_priv->table_lock); 400 401 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 402 403 spin_unlock(&file_priv->table_lock); 404 idr_preload_end(); 405 406 mutex_unlock(&dev->object_name_lock); 407 if (ret < 0) 408 goto err_unref; 409 410 handle = ret; 411 412 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 413 if (ret) 414 goto err_remove; 415 416 if (obj->funcs && obj->funcs->open) { 417 ret = obj->funcs->open(obj, file_priv); 418 if (ret) 419 goto err_revoke; 420 } else if (dev->driver->gem_open_object) { 421 ret = dev->driver->gem_open_object(obj, file_priv); 422 if (ret) 423 goto err_revoke; 424 } 425 426 *handlep = handle; 427 return 0; 428 429 err_revoke: 430 drm_vma_node_revoke(&obj->vma_node, file_priv); 431 err_remove: 432 spin_lock(&file_priv->table_lock); 433 idr_remove(&file_priv->object_idr, handle); 434 spin_unlock(&file_priv->table_lock); 435 err_unref: 436 drm_gem_object_handle_put_unlocked(obj); 437 return ret; 438 } 439 440 /** 441 * drm_gem_handle_create - create a gem handle for an object 442 * @file_priv: drm file-private structure to register the handle for 443 * @obj: object to register 444 * @handlep: pionter to return the created handle to the caller 445 * 446 * Create a handle for this object. This adds a handle reference to the object, 447 * which includes a regular reference count. Callers will likely want to 448 * dereference the object afterwards. 449 * 450 * Since this publishes @obj to userspace it must be fully set up by this point, 451 * drivers must call this last in their buffer object creation callbacks. 452 */ 453 int drm_gem_handle_create(struct drm_file *file_priv, 454 struct drm_gem_object *obj, 455 u32 *handlep) 456 { 457 mutex_lock(&obj->dev->object_name_lock); 458 459 return drm_gem_handle_create_tail(file_priv, obj, handlep); 460 } 461 EXPORT_SYMBOL(drm_gem_handle_create); 462 463 464 /** 465 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 466 * @obj: obj in question 467 * 468 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 469 * 470 * Note that drm_gem_object_release() already calls this function, so drivers 471 * don't have to take care of releasing the mmap offset themselves when freeing 472 * the GEM object. 473 */ 474 void 475 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 476 { 477 struct drm_device *dev = obj->dev; 478 479 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 480 } 481 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 482 483 /** 484 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 485 * @obj: obj in question 486 * @size: the virtual size 487 * 488 * GEM memory mapping works by handing back to userspace a fake mmap offset 489 * it can use in a subsequent mmap(2) call. The DRM core code then looks 490 * up the object based on the offset and sets up the various memory mapping 491 * structures. 492 * 493 * This routine allocates and attaches a fake offset for @obj, in cases where 494 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 495 * Otherwise just use drm_gem_create_mmap_offset(). 496 * 497 * This function is idempotent and handles an already allocated mmap offset 498 * transparently. Drivers do not need to check for this case. 499 */ 500 int 501 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 502 { 503 struct drm_device *dev = obj->dev; 504 505 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 506 size / PAGE_SIZE); 507 } 508 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 509 510 /** 511 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 512 * @obj: obj in question 513 * 514 * GEM memory mapping works by handing back to userspace a fake mmap offset 515 * it can use in a subsequent mmap(2) call. The DRM core code then looks 516 * up the object based on the offset and sets up the various memory mapping 517 * structures. 518 * 519 * This routine allocates and attaches a fake offset for @obj. 520 * 521 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 522 * the fake offset again. 523 */ 524 int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 525 { 526 return drm_gem_create_mmap_offset_size(obj, obj->size); 527 } 528 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 529 530 /* 531 * Move pages to appropriate lru and release the pagevec, decrementing the 532 * ref count of those pages. 533 */ 534 static void drm_gem_check_release_pagevec(struct pagevec *pvec) 535 { 536 check_move_unevictable_pages(pvec); 537 __pagevec_release(pvec); 538 cond_resched(); 539 } 540 541 /** 542 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 543 * from shmem 544 * @obj: obj in question 545 * 546 * This reads the page-array of the shmem-backing storage of the given gem 547 * object. An array of pages is returned. If a page is not allocated or 548 * swapped-out, this will allocate/swap-in the required pages. Note that the 549 * whole object is covered by the page-array and pinned in memory. 550 * 551 * Use drm_gem_put_pages() to release the array and unpin all pages. 552 * 553 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 554 * If you require other GFP-masks, you have to do those allocations yourself. 555 * 556 * Note that you are not allowed to change gfp-zones during runtime. That is, 557 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 558 * set during initialization. If you have special zone constraints, set them 559 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 560 * to keep pages in the required zone during swap-in. 561 */ 562 struct page **drm_gem_get_pages(struct drm_gem_object *obj) 563 { 564 struct address_space *mapping; 565 struct page *p, **pages; 566 struct pagevec pvec; 567 int i, npages; 568 569 /* This is the shared memory object that backs the GEM resource */ 570 mapping = obj->filp->f_mapping; 571 572 /* We already BUG_ON() for non-page-aligned sizes in 573 * drm_gem_object_init(), so we should never hit this unless 574 * driver author is doing something really wrong: 575 */ 576 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 577 578 npages = obj->size >> PAGE_SHIFT; 579 580 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 581 if (pages == NULL) 582 return ERR_PTR(-ENOMEM); 583 584 mapping_set_unevictable(mapping); 585 586 for (i = 0; i < npages; i++) { 587 p = shmem_read_mapping_page(mapping, i); 588 if (IS_ERR(p)) 589 goto fail; 590 pages[i] = p; 591 592 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 593 * correct region during swapin. Note that this requires 594 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 595 * so shmem can relocate pages during swapin if required. 596 */ 597 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 598 (page_to_pfn(p) >= 0x00100000UL)); 599 } 600 601 return pages; 602 603 fail: 604 mapping_clear_unevictable(mapping); 605 pagevec_init(&pvec); 606 while (i--) { 607 if (!pagevec_add(&pvec, pages[i])) 608 drm_gem_check_release_pagevec(&pvec); 609 } 610 if (pagevec_count(&pvec)) 611 drm_gem_check_release_pagevec(&pvec); 612 613 kvfree(pages); 614 return ERR_CAST(p); 615 } 616 EXPORT_SYMBOL(drm_gem_get_pages); 617 618 /** 619 * drm_gem_put_pages - helper to free backing pages for a GEM object 620 * @obj: obj in question 621 * @pages: pages to free 622 * @dirty: if true, pages will be marked as dirty 623 * @accessed: if true, the pages will be marked as accessed 624 */ 625 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 626 bool dirty, bool accessed) 627 { 628 int i, npages; 629 struct address_space *mapping; 630 struct pagevec pvec; 631 632 mapping = file_inode(obj->filp)->i_mapping; 633 mapping_clear_unevictable(mapping); 634 635 /* We already BUG_ON() for non-page-aligned sizes in 636 * drm_gem_object_init(), so we should never hit this unless 637 * driver author is doing something really wrong: 638 */ 639 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 640 641 npages = obj->size >> PAGE_SHIFT; 642 643 pagevec_init(&pvec); 644 for (i = 0; i < npages; i++) { 645 if (dirty) 646 set_page_dirty(pages[i]); 647 648 if (accessed) 649 mark_page_accessed(pages[i]); 650 651 /* Undo the reference we took when populating the table */ 652 if (!pagevec_add(&pvec, pages[i])) 653 drm_gem_check_release_pagevec(&pvec); 654 } 655 if (pagevec_count(&pvec)) 656 drm_gem_check_release_pagevec(&pvec); 657 658 kvfree(pages); 659 } 660 EXPORT_SYMBOL(drm_gem_put_pages); 661 662 /** 663 * drm_gem_object_lookup - look up a GEM object from its handle 664 * @filp: DRM file private date 665 * @handle: userspace handle 666 * 667 * Returns: 668 * 669 * A reference to the object named by the handle if such exists on @filp, NULL 670 * otherwise. 671 */ 672 struct drm_gem_object * 673 drm_gem_object_lookup(struct drm_file *filp, u32 handle) 674 { 675 struct drm_gem_object *obj; 676 677 spin_lock(&filp->table_lock); 678 679 /* Check if we currently have a reference on the object */ 680 obj = idr_find(&filp->object_idr, handle); 681 if (obj) 682 drm_gem_object_get(obj); 683 684 spin_unlock(&filp->table_lock); 685 686 return obj; 687 } 688 EXPORT_SYMBOL(drm_gem_object_lookup); 689 690 /** 691 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 692 * @dev: drm_device 693 * @data: ioctl data 694 * @file_priv: drm file-private structure 695 * 696 * Releases the handle to an mm object. 697 */ 698 int 699 drm_gem_close_ioctl(struct drm_device *dev, void *data, 700 struct drm_file *file_priv) 701 { 702 struct drm_gem_close *args = data; 703 int ret; 704 705 if (!drm_core_check_feature(dev, DRIVER_GEM)) 706 return -EOPNOTSUPP; 707 708 ret = drm_gem_handle_delete(file_priv, args->handle); 709 710 return ret; 711 } 712 713 /** 714 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 715 * @dev: drm_device 716 * @data: ioctl data 717 * @file_priv: drm file-private structure 718 * 719 * Create a global name for an object, returning the name. 720 * 721 * Note that the name does not hold a reference; when the object 722 * is freed, the name goes away. 723 */ 724 int 725 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 726 struct drm_file *file_priv) 727 { 728 struct drm_gem_flink *args = data; 729 struct drm_gem_object *obj; 730 int ret; 731 732 if (!drm_core_check_feature(dev, DRIVER_GEM)) 733 return -EOPNOTSUPP; 734 735 obj = drm_gem_object_lookup(file_priv, args->handle); 736 if (obj == NULL) 737 return -ENOENT; 738 739 mutex_lock(&dev->object_name_lock); 740 /* prevent races with concurrent gem_close. */ 741 if (obj->handle_count == 0) { 742 ret = -ENOENT; 743 goto err; 744 } 745 746 if (!obj->name) { 747 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 748 if (ret < 0) 749 goto err; 750 751 obj->name = ret; 752 } 753 754 args->name = (uint64_t) obj->name; 755 ret = 0; 756 757 err: 758 mutex_unlock(&dev->object_name_lock); 759 drm_gem_object_put_unlocked(obj); 760 return ret; 761 } 762 763 /** 764 * drm_gem_open - implementation of the GEM_OPEN ioctl 765 * @dev: drm_device 766 * @data: ioctl data 767 * @file_priv: drm file-private structure 768 * 769 * Open an object using the global name, returning a handle and the size. 770 * 771 * This handle (of course) holds a reference to the object, so the object 772 * will not go away until the handle is deleted. 773 */ 774 int 775 drm_gem_open_ioctl(struct drm_device *dev, void *data, 776 struct drm_file *file_priv) 777 { 778 struct drm_gem_open *args = data; 779 struct drm_gem_object *obj; 780 int ret; 781 u32 handle; 782 783 if (!drm_core_check_feature(dev, DRIVER_GEM)) 784 return -EOPNOTSUPP; 785 786 mutex_lock(&dev->object_name_lock); 787 obj = idr_find(&dev->object_name_idr, (int) args->name); 788 if (obj) { 789 drm_gem_object_get(obj); 790 } else { 791 mutex_unlock(&dev->object_name_lock); 792 return -ENOENT; 793 } 794 795 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 796 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 797 drm_gem_object_put_unlocked(obj); 798 if (ret) 799 return ret; 800 801 args->handle = handle; 802 args->size = obj->size; 803 804 return 0; 805 } 806 807 /** 808 * gem_gem_open - initalizes GEM file-private structures at devnode open time 809 * @dev: drm_device which is being opened by userspace 810 * @file_private: drm file-private structure to set up 811 * 812 * Called at device open time, sets up the structure for handling refcounting 813 * of mm objects. 814 */ 815 void 816 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 817 { 818 idr_init_base(&file_private->object_idr, 1); 819 spin_lock_init(&file_private->table_lock); 820 } 821 822 /** 823 * drm_gem_release - release file-private GEM resources 824 * @dev: drm_device which is being closed by userspace 825 * @file_private: drm file-private structure to clean up 826 * 827 * Called at close time when the filp is going away. 828 * 829 * Releases any remaining references on objects by this filp. 830 */ 831 void 832 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 833 { 834 idr_for_each(&file_private->object_idr, 835 &drm_gem_object_release_handle, file_private); 836 idr_destroy(&file_private->object_idr); 837 } 838 839 /** 840 * drm_gem_object_release - release GEM buffer object resources 841 * @obj: GEM buffer object 842 * 843 * This releases any structures and resources used by @obj and is the invers of 844 * drm_gem_object_init(). 845 */ 846 void 847 drm_gem_object_release(struct drm_gem_object *obj) 848 { 849 WARN_ON(obj->dma_buf); 850 851 if (obj->filp) 852 fput(obj->filp); 853 854 drm_gem_free_mmap_offset(obj); 855 } 856 EXPORT_SYMBOL(drm_gem_object_release); 857 858 /** 859 * drm_gem_object_free - free a GEM object 860 * @kref: kref of the object to free 861 * 862 * Called after the last reference to the object has been lost. 863 * Must be called holding &drm_device.struct_mutex. 864 * 865 * Frees the object 866 */ 867 void 868 drm_gem_object_free(struct kref *kref) 869 { 870 struct drm_gem_object *obj = 871 container_of(kref, struct drm_gem_object, refcount); 872 struct drm_device *dev = obj->dev; 873 874 if (obj->funcs) { 875 obj->funcs->free(obj); 876 } else if (dev->driver->gem_free_object_unlocked) { 877 dev->driver->gem_free_object_unlocked(obj); 878 } else if (dev->driver->gem_free_object) { 879 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 880 881 dev->driver->gem_free_object(obj); 882 } 883 } 884 EXPORT_SYMBOL(drm_gem_object_free); 885 886 /** 887 * drm_gem_object_put_unlocked - drop a GEM buffer object reference 888 * @obj: GEM buffer object 889 * 890 * This releases a reference to @obj. Callers must not hold the 891 * &drm_device.struct_mutex lock when calling this function. 892 * 893 * See also __drm_gem_object_put(). 894 */ 895 void 896 drm_gem_object_put_unlocked(struct drm_gem_object *obj) 897 { 898 struct drm_device *dev; 899 900 if (!obj) 901 return; 902 903 dev = obj->dev; 904 905 if (dev->driver->gem_free_object) { 906 might_lock(&dev->struct_mutex); 907 if (kref_put_mutex(&obj->refcount, drm_gem_object_free, 908 &dev->struct_mutex)) 909 mutex_unlock(&dev->struct_mutex); 910 } else { 911 kref_put(&obj->refcount, drm_gem_object_free); 912 } 913 } 914 EXPORT_SYMBOL(drm_gem_object_put_unlocked); 915 916 /** 917 * drm_gem_object_put - release a GEM buffer object reference 918 * @obj: GEM buffer object 919 * 920 * This releases a reference to @obj. Callers must hold the 921 * &drm_device.struct_mutex lock when calling this function, even when the 922 * driver doesn't use &drm_device.struct_mutex for anything. 923 * 924 * For drivers not encumbered with legacy locking use 925 * drm_gem_object_put_unlocked() instead. 926 */ 927 void 928 drm_gem_object_put(struct drm_gem_object *obj) 929 { 930 if (obj) { 931 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 932 933 kref_put(&obj->refcount, drm_gem_object_free); 934 } 935 } 936 EXPORT_SYMBOL(drm_gem_object_put); 937 938 /** 939 * drm_gem_vm_open - vma->ops->open implementation for GEM 940 * @vma: VM area structure 941 * 942 * This function implements the #vm_operations_struct open() callback for GEM 943 * drivers. This must be used together with drm_gem_vm_close(). 944 */ 945 void drm_gem_vm_open(struct vm_area_struct *vma) 946 { 947 struct drm_gem_object *obj = vma->vm_private_data; 948 949 drm_gem_object_get(obj); 950 } 951 EXPORT_SYMBOL(drm_gem_vm_open); 952 953 /** 954 * drm_gem_vm_close - vma->ops->close implementation for GEM 955 * @vma: VM area structure 956 * 957 * This function implements the #vm_operations_struct close() callback for GEM 958 * drivers. This must be used together with drm_gem_vm_open(). 959 */ 960 void drm_gem_vm_close(struct vm_area_struct *vma) 961 { 962 struct drm_gem_object *obj = vma->vm_private_data; 963 964 drm_gem_object_put_unlocked(obj); 965 } 966 EXPORT_SYMBOL(drm_gem_vm_close); 967 968 /** 969 * drm_gem_mmap_obj - memory map a GEM object 970 * @obj: the GEM object to map 971 * @obj_size: the object size to be mapped, in bytes 972 * @vma: VMA for the area to be mapped 973 * 974 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops 975 * provided by the driver. Depending on their requirements, drivers can either 976 * provide a fault handler in their gem_vm_ops (in which case any accesses to 977 * the object will be trapped, to perform migration, GTT binding, surface 978 * register allocation, or performance monitoring), or mmap the buffer memory 979 * synchronously after calling drm_gem_mmap_obj. 980 * 981 * This function is mainly intended to implement the DMABUF mmap operation, when 982 * the GEM object is not looked up based on its fake offset. To implement the 983 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 984 * 985 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 986 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 987 * callers must verify access restrictions before calling this helper. 988 * 989 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 990 * size, or if no gem_vm_ops are provided. 991 */ 992 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 993 struct vm_area_struct *vma) 994 { 995 struct drm_device *dev = obj->dev; 996 997 /* Check for valid size. */ 998 if (obj_size < vma->vm_end - vma->vm_start) 999 return -EINVAL; 1000 1001 if (obj->funcs && obj->funcs->vm_ops) 1002 vma->vm_ops = obj->funcs->vm_ops; 1003 else if (dev->driver->gem_vm_ops) 1004 vma->vm_ops = dev->driver->gem_vm_ops; 1005 else 1006 return -EINVAL; 1007 1008 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1009 vma->vm_private_data = obj; 1010 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1011 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1012 1013 /* Take a ref for this mapping of the object, so that the fault 1014 * handler can dereference the mmap offset's pointer to the object. 1015 * This reference is cleaned up by the corresponding vm_close 1016 * (which should happen whether the vma was created by this call, or 1017 * by a vm_open due to mremap or partial unmap or whatever). 1018 */ 1019 drm_gem_object_get(obj); 1020 1021 return 0; 1022 } 1023 EXPORT_SYMBOL(drm_gem_mmap_obj); 1024 1025 /** 1026 * drm_gem_mmap - memory map routine for GEM objects 1027 * @filp: DRM file pointer 1028 * @vma: VMA for the area to be mapped 1029 * 1030 * If a driver supports GEM object mapping, mmap calls on the DRM file 1031 * descriptor will end up here. 1032 * 1033 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1034 * contain the fake offset we created when the GTT map ioctl was called on 1035 * the object) and map it with a call to drm_gem_mmap_obj(). 1036 * 1037 * If the caller is not granted access to the buffer object, the mmap will fail 1038 * with EACCES. Please see the vma manager for more information. 1039 */ 1040 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1041 { 1042 struct drm_file *priv = filp->private_data; 1043 struct drm_device *dev = priv->minor->dev; 1044 struct drm_gem_object *obj = NULL; 1045 struct drm_vma_offset_node *node; 1046 int ret; 1047 1048 if (drm_dev_is_unplugged(dev)) 1049 return -ENODEV; 1050 1051 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1052 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1053 vma->vm_pgoff, 1054 vma_pages(vma)); 1055 if (likely(node)) { 1056 obj = container_of(node, struct drm_gem_object, vma_node); 1057 /* 1058 * When the object is being freed, after it hits 0-refcnt it 1059 * proceeds to tear down the object. In the process it will 1060 * attempt to remove the VMA offset and so acquire this 1061 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1062 * that matches our range, we know it is in the process of being 1063 * destroyed and will be freed as soon as we release the lock - 1064 * so we have to check for the 0-refcnted object and treat it as 1065 * invalid. 1066 */ 1067 if (!kref_get_unless_zero(&obj->refcount)) 1068 obj = NULL; 1069 } 1070 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1071 1072 if (!obj) 1073 return -EINVAL; 1074 1075 if (!drm_vma_node_is_allowed(node, priv)) { 1076 drm_gem_object_put_unlocked(obj); 1077 return -EACCES; 1078 } 1079 1080 if (node->readonly) { 1081 if (vma->vm_flags & VM_WRITE) { 1082 drm_gem_object_put_unlocked(obj); 1083 return -EINVAL; 1084 } 1085 1086 vma->vm_flags &= ~VM_MAYWRITE; 1087 } 1088 1089 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1090 vma); 1091 1092 drm_gem_object_put_unlocked(obj); 1093 1094 return ret; 1095 } 1096 EXPORT_SYMBOL(drm_gem_mmap); 1097 1098 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1099 const struct drm_gem_object *obj) 1100 { 1101 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1102 drm_printf_indent(p, indent, "refcount=%u\n", 1103 kref_read(&obj->refcount)); 1104 drm_printf_indent(p, indent, "start=%08lx\n", 1105 drm_vma_node_start(&obj->vma_node)); 1106 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1107 drm_printf_indent(p, indent, "imported=%s\n", 1108 obj->import_attach ? "yes" : "no"); 1109 1110 if (obj->funcs && obj->funcs->print_info) 1111 obj->funcs->print_info(p, indent, obj); 1112 else if (obj->dev->driver->gem_print_info) 1113 obj->dev->driver->gem_print_info(p, indent, obj); 1114 } 1115 1116 /** 1117 * drm_gem_pin - Pin backing buffer in memory 1118 * @obj: GEM object 1119 * 1120 * Make sure the backing buffer is pinned in memory. 1121 * 1122 * Returns: 1123 * 0 on success or a negative error code on failure. 1124 */ 1125 int drm_gem_pin(struct drm_gem_object *obj) 1126 { 1127 if (obj->funcs && obj->funcs->pin) 1128 return obj->funcs->pin(obj); 1129 else if (obj->dev->driver->gem_prime_pin) 1130 return obj->dev->driver->gem_prime_pin(obj); 1131 else 1132 return 0; 1133 } 1134 EXPORT_SYMBOL(drm_gem_pin); 1135 1136 /** 1137 * drm_gem_unpin - Unpin backing buffer from memory 1138 * @obj: GEM object 1139 * 1140 * Relax the requirement that the backing buffer is pinned in memory. 1141 */ 1142 void drm_gem_unpin(struct drm_gem_object *obj) 1143 { 1144 if (obj->funcs && obj->funcs->unpin) 1145 obj->funcs->unpin(obj); 1146 else if (obj->dev->driver->gem_prime_unpin) 1147 obj->dev->driver->gem_prime_unpin(obj); 1148 } 1149 EXPORT_SYMBOL(drm_gem_unpin); 1150 1151 /** 1152 * drm_gem_vmap - Map buffer into kernel virtual address space 1153 * @obj: GEM object 1154 * 1155 * Returns: 1156 * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative 1157 * error code on failure. 1158 */ 1159 void *drm_gem_vmap(struct drm_gem_object *obj) 1160 { 1161 void *vaddr; 1162 1163 if (obj->funcs && obj->funcs->vmap) 1164 vaddr = obj->funcs->vmap(obj); 1165 else if (obj->dev->driver->gem_prime_vmap) 1166 vaddr = obj->dev->driver->gem_prime_vmap(obj); 1167 else 1168 vaddr = ERR_PTR(-EOPNOTSUPP); 1169 1170 if (!vaddr) 1171 vaddr = ERR_PTR(-ENOMEM); 1172 1173 return vaddr; 1174 } 1175 EXPORT_SYMBOL(drm_gem_vmap); 1176 1177 /** 1178 * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space 1179 * @obj: GEM object 1180 * @vaddr: Virtual address (can be NULL) 1181 */ 1182 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) 1183 { 1184 if (!vaddr) 1185 return; 1186 1187 if (obj->funcs && obj->funcs->vunmap) 1188 obj->funcs->vunmap(obj, vaddr); 1189 else if (obj->dev->driver->gem_prime_vunmap) 1190 obj->dev->driver->gem_prime_vunmap(obj, vaddr); 1191 } 1192 EXPORT_SYMBOL(drm_gem_vunmap); 1193