1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include <linux/dma-buf.h> 39 #include <drm/drmP.h> 40 41 /** @file drm_gem.c 42 * 43 * This file provides some of the base ioctls and library routines for 44 * the graphics memory manager implemented by each device driver. 45 * 46 * Because various devices have different requirements in terms of 47 * synchronization and migration strategies, implementing that is left up to 48 * the driver, and all that the general API provides should be generic -- 49 * allocating objects, reading/writing data with the cpu, freeing objects. 50 * Even there, platform-dependent optimizations for reading/writing data with 51 * the CPU mean we'll likely hook those out to driver-specific calls. However, 52 * the DRI2 implementation wants to have at least allocate/mmap be generic. 53 * 54 * The goal was to have swap-backed object allocation managed through 55 * struct file. However, file descriptors as handles to a struct file have 56 * two major failings: 57 * - Process limits prevent more than 1024 or so being used at a time by 58 * default. 59 * - Inability to allocate high fds will aggravate the X Server's select() 60 * handling, and likely that of many GL client applications as well. 61 * 62 * This led to a plan of using our own integer IDs (called handles, following 63 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 64 * ioctls. The objects themselves will still include the struct file so 65 * that we can transition to fds if the required kernel infrastructure shows 66 * up at a later date, and as our interface with shmfs for memory allocation. 67 */ 68 69 /* 70 * We make up offsets for buffer objects so we can recognize them at 71 * mmap time. 72 */ 73 74 /* pgoff in mmap is an unsigned long, so we need to make sure that 75 * the faked up offset will fit 76 */ 77 78 #if BITS_PER_LONG == 64 79 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 80 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 81 #else 82 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 83 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 84 #endif 85 86 /** 87 * Initialize the GEM device fields 88 */ 89 90 int 91 drm_gem_init(struct drm_device *dev) 92 { 93 struct drm_gem_mm *mm; 94 95 spin_lock_init(&dev->object_name_lock); 96 idr_init(&dev->object_name_idr); 97 98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 99 if (!mm) { 100 DRM_ERROR("out of memory\n"); 101 return -ENOMEM; 102 } 103 104 dev->mm_private = mm; 105 106 if (drm_ht_create(&mm->offset_hash, 12)) { 107 kfree(mm); 108 return -ENOMEM; 109 } 110 111 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 112 DRM_FILE_PAGE_OFFSET_SIZE)) { 113 drm_ht_remove(&mm->offset_hash); 114 kfree(mm); 115 return -ENOMEM; 116 } 117 118 return 0; 119 } 120 121 void 122 drm_gem_destroy(struct drm_device *dev) 123 { 124 struct drm_gem_mm *mm = dev->mm_private; 125 126 drm_mm_takedown(&mm->offset_manager); 127 drm_ht_remove(&mm->offset_hash); 128 kfree(mm); 129 dev->mm_private = NULL; 130 } 131 132 /** 133 * Initialize an already allocated GEM object of the specified size with 134 * shmfs backing store. 135 */ 136 int drm_gem_object_init(struct drm_device *dev, 137 struct drm_gem_object *obj, size_t size) 138 { 139 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 140 141 obj->dev = dev; 142 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 143 if (IS_ERR(obj->filp)) 144 return PTR_ERR(obj->filp); 145 146 kref_init(&obj->refcount); 147 atomic_set(&obj->handle_count, 0); 148 obj->size = size; 149 150 return 0; 151 } 152 EXPORT_SYMBOL(drm_gem_object_init); 153 154 /** 155 * Initialize an already allocated GEM object of the specified size with 156 * no GEM provided backing store. Instead the caller is responsible for 157 * backing the object and handling it. 158 */ 159 int drm_gem_private_object_init(struct drm_device *dev, 160 struct drm_gem_object *obj, size_t size) 161 { 162 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 163 164 obj->dev = dev; 165 obj->filp = NULL; 166 167 kref_init(&obj->refcount); 168 atomic_set(&obj->handle_count, 0); 169 obj->size = size; 170 171 return 0; 172 } 173 EXPORT_SYMBOL(drm_gem_private_object_init); 174 175 /** 176 * Allocate a GEM object of the specified size with shmfs backing store 177 */ 178 struct drm_gem_object * 179 drm_gem_object_alloc(struct drm_device *dev, size_t size) 180 { 181 struct drm_gem_object *obj; 182 183 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 184 if (!obj) 185 goto free; 186 187 if (drm_gem_object_init(dev, obj, size) != 0) 188 goto free; 189 190 if (dev->driver->gem_init_object != NULL && 191 dev->driver->gem_init_object(obj) != 0) { 192 goto fput; 193 } 194 return obj; 195 fput: 196 /* Object_init mangles the global counters - readjust them. */ 197 fput(obj->filp); 198 free: 199 kfree(obj); 200 return NULL; 201 } 202 EXPORT_SYMBOL(drm_gem_object_alloc); 203 204 static void 205 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 206 { 207 if (obj->import_attach) { 208 drm_prime_remove_imported_buf_handle(&filp->prime, 209 obj->import_attach->dmabuf); 210 } 211 if (obj->export_dma_buf) { 212 drm_prime_remove_imported_buf_handle(&filp->prime, 213 obj->export_dma_buf); 214 } 215 } 216 217 /** 218 * Removes the mapping from handle to filp for this object. 219 */ 220 int 221 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 222 { 223 struct drm_device *dev; 224 struct drm_gem_object *obj; 225 226 /* This is gross. The idr system doesn't let us try a delete and 227 * return an error code. It just spews if you fail at deleting. 228 * So, we have to grab a lock around finding the object and then 229 * doing the delete on it and dropping the refcount, or the user 230 * could race us to double-decrement the refcount and cause a 231 * use-after-free later. Given the frequency of our handle lookups, 232 * we may want to use ida for number allocation and a hash table 233 * for the pointers, anyway. 234 */ 235 spin_lock(&filp->table_lock); 236 237 /* Check if we currently have a reference on the object */ 238 obj = idr_find(&filp->object_idr, handle); 239 if (obj == NULL) { 240 spin_unlock(&filp->table_lock); 241 return -EINVAL; 242 } 243 dev = obj->dev; 244 245 /* Release reference and decrement refcount. */ 246 idr_remove(&filp->object_idr, handle); 247 spin_unlock(&filp->table_lock); 248 249 drm_gem_remove_prime_handles(obj, filp); 250 251 if (dev->driver->gem_close_object) 252 dev->driver->gem_close_object(obj, filp); 253 drm_gem_object_handle_unreference_unlocked(obj); 254 255 return 0; 256 } 257 EXPORT_SYMBOL(drm_gem_handle_delete); 258 259 /** 260 * Create a handle for this object. This adds a handle reference 261 * to the object, which includes a regular reference count. Callers 262 * will likely want to dereference the object afterwards. 263 */ 264 int 265 drm_gem_handle_create(struct drm_file *file_priv, 266 struct drm_gem_object *obj, 267 u32 *handlep) 268 { 269 struct drm_device *dev = obj->dev; 270 int ret; 271 272 /* 273 * Get the user-visible handle using idr. Preload and perform 274 * allocation under our spinlock. 275 */ 276 idr_preload(GFP_KERNEL); 277 spin_lock(&file_priv->table_lock); 278 279 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 280 281 spin_unlock(&file_priv->table_lock); 282 idr_preload_end(); 283 if (ret < 0) 284 return ret; 285 *handlep = ret; 286 287 drm_gem_object_handle_reference(obj); 288 289 if (dev->driver->gem_open_object) { 290 ret = dev->driver->gem_open_object(obj, file_priv); 291 if (ret) { 292 drm_gem_handle_delete(file_priv, *handlep); 293 return ret; 294 } 295 } 296 297 return 0; 298 } 299 EXPORT_SYMBOL(drm_gem_handle_create); 300 301 302 /** 303 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 304 * @obj: obj in question 305 * 306 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 307 */ 308 void 309 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 310 { 311 struct drm_device *dev = obj->dev; 312 struct drm_gem_mm *mm = dev->mm_private; 313 struct drm_map_list *list = &obj->map_list; 314 315 drm_ht_remove_item(&mm->offset_hash, &list->hash); 316 drm_mm_put_block(list->file_offset_node); 317 kfree(list->map); 318 list->map = NULL; 319 } 320 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 321 322 /** 323 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 324 * @obj: obj in question 325 * 326 * GEM memory mapping works by handing back to userspace a fake mmap offset 327 * it can use in a subsequent mmap(2) call. The DRM core code then looks 328 * up the object based on the offset and sets up the various memory mapping 329 * structures. 330 * 331 * This routine allocates and attaches a fake offset for @obj. 332 */ 333 int 334 drm_gem_create_mmap_offset(struct drm_gem_object *obj) 335 { 336 struct drm_device *dev = obj->dev; 337 struct drm_gem_mm *mm = dev->mm_private; 338 struct drm_map_list *list; 339 struct drm_local_map *map; 340 int ret; 341 342 /* Set the object up for mmap'ing */ 343 list = &obj->map_list; 344 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 345 if (!list->map) 346 return -ENOMEM; 347 348 map = list->map; 349 map->type = _DRM_GEM; 350 map->size = obj->size; 351 map->handle = obj; 352 353 /* Get a DRM GEM mmap offset allocated... */ 354 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 355 obj->size / PAGE_SIZE, 0, false); 356 357 if (!list->file_offset_node) { 358 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 359 ret = -ENOSPC; 360 goto out_free_list; 361 } 362 363 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 364 obj->size / PAGE_SIZE, 0); 365 if (!list->file_offset_node) { 366 ret = -ENOMEM; 367 goto out_free_list; 368 } 369 370 list->hash.key = list->file_offset_node->start; 371 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); 372 if (ret) { 373 DRM_ERROR("failed to add to map hash\n"); 374 goto out_free_mm; 375 } 376 377 return 0; 378 379 out_free_mm: 380 drm_mm_put_block(list->file_offset_node); 381 out_free_list: 382 kfree(list->map); 383 list->map = NULL; 384 385 return ret; 386 } 387 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 388 389 /** Returns a reference to the object named by the handle. */ 390 struct drm_gem_object * 391 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 392 u32 handle) 393 { 394 struct drm_gem_object *obj; 395 396 spin_lock(&filp->table_lock); 397 398 /* Check if we currently have a reference on the object */ 399 obj = idr_find(&filp->object_idr, handle); 400 if (obj == NULL) { 401 spin_unlock(&filp->table_lock); 402 return NULL; 403 } 404 405 drm_gem_object_reference(obj); 406 407 spin_unlock(&filp->table_lock); 408 409 return obj; 410 } 411 EXPORT_SYMBOL(drm_gem_object_lookup); 412 413 /** 414 * Releases the handle to an mm object. 415 */ 416 int 417 drm_gem_close_ioctl(struct drm_device *dev, void *data, 418 struct drm_file *file_priv) 419 { 420 struct drm_gem_close *args = data; 421 int ret; 422 423 if (!(dev->driver->driver_features & DRIVER_GEM)) 424 return -ENODEV; 425 426 ret = drm_gem_handle_delete(file_priv, args->handle); 427 428 return ret; 429 } 430 431 /** 432 * Create a global name for an object, returning the name. 433 * 434 * Note that the name does not hold a reference; when the object 435 * is freed, the name goes away. 436 */ 437 int 438 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 439 struct drm_file *file_priv) 440 { 441 struct drm_gem_flink *args = data; 442 struct drm_gem_object *obj; 443 int ret; 444 445 if (!(dev->driver->driver_features & DRIVER_GEM)) 446 return -ENODEV; 447 448 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 449 if (obj == NULL) 450 return -ENOENT; 451 452 idr_preload(GFP_KERNEL); 453 spin_lock(&dev->object_name_lock); 454 if (!obj->name) { 455 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 456 obj->name = ret; 457 args->name = (uint64_t) obj->name; 458 spin_unlock(&dev->object_name_lock); 459 idr_preload_end(); 460 461 if (ret < 0) 462 goto err; 463 ret = 0; 464 465 /* Allocate a reference for the name table. */ 466 drm_gem_object_reference(obj); 467 } else { 468 args->name = (uint64_t) obj->name; 469 spin_unlock(&dev->object_name_lock); 470 idr_preload_end(); 471 ret = 0; 472 } 473 474 err: 475 drm_gem_object_unreference_unlocked(obj); 476 return ret; 477 } 478 479 /** 480 * Open an object using the global name, returning a handle and the size. 481 * 482 * This handle (of course) holds a reference to the object, so the object 483 * will not go away until the handle is deleted. 484 */ 485 int 486 drm_gem_open_ioctl(struct drm_device *dev, void *data, 487 struct drm_file *file_priv) 488 { 489 struct drm_gem_open *args = data; 490 struct drm_gem_object *obj; 491 int ret; 492 u32 handle; 493 494 if (!(dev->driver->driver_features & DRIVER_GEM)) 495 return -ENODEV; 496 497 spin_lock(&dev->object_name_lock); 498 obj = idr_find(&dev->object_name_idr, (int) args->name); 499 if (obj) 500 drm_gem_object_reference(obj); 501 spin_unlock(&dev->object_name_lock); 502 if (!obj) 503 return -ENOENT; 504 505 ret = drm_gem_handle_create(file_priv, obj, &handle); 506 drm_gem_object_unreference_unlocked(obj); 507 if (ret) 508 return ret; 509 510 args->handle = handle; 511 args->size = obj->size; 512 513 return 0; 514 } 515 516 /** 517 * Called at device open time, sets up the structure for handling refcounting 518 * of mm objects. 519 */ 520 void 521 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 522 { 523 idr_init(&file_private->object_idr); 524 spin_lock_init(&file_private->table_lock); 525 } 526 527 /** 528 * Called at device close to release the file's 529 * handle references on objects. 530 */ 531 static int 532 drm_gem_object_release_handle(int id, void *ptr, void *data) 533 { 534 struct drm_file *file_priv = data; 535 struct drm_gem_object *obj = ptr; 536 struct drm_device *dev = obj->dev; 537 538 drm_gem_remove_prime_handles(obj, file_priv); 539 540 if (dev->driver->gem_close_object) 541 dev->driver->gem_close_object(obj, file_priv); 542 543 drm_gem_object_handle_unreference_unlocked(obj); 544 545 return 0; 546 } 547 548 /** 549 * Called at close time when the filp is going away. 550 * 551 * Releases any remaining references on objects by this filp. 552 */ 553 void 554 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 555 { 556 idr_for_each(&file_private->object_idr, 557 &drm_gem_object_release_handle, file_private); 558 idr_destroy(&file_private->object_idr); 559 } 560 561 void 562 drm_gem_object_release(struct drm_gem_object *obj) 563 { 564 if (obj->filp) 565 fput(obj->filp); 566 } 567 EXPORT_SYMBOL(drm_gem_object_release); 568 569 /** 570 * Called after the last reference to the object has been lost. 571 * Must be called holding struct_ mutex 572 * 573 * Frees the object 574 */ 575 void 576 drm_gem_object_free(struct kref *kref) 577 { 578 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 579 struct drm_device *dev = obj->dev; 580 581 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 582 583 if (dev->driver->gem_free_object != NULL) 584 dev->driver->gem_free_object(obj); 585 } 586 EXPORT_SYMBOL(drm_gem_object_free); 587 588 static void drm_gem_object_ref_bug(struct kref *list_kref) 589 { 590 BUG(); 591 } 592 593 /** 594 * Called after the last handle to the object has been closed 595 * 596 * Removes any name for the object. Note that this must be 597 * called before drm_gem_object_free or we'll be touching 598 * freed memory 599 */ 600 void drm_gem_object_handle_free(struct drm_gem_object *obj) 601 { 602 struct drm_device *dev = obj->dev; 603 604 /* Remove any name for this object */ 605 spin_lock(&dev->object_name_lock); 606 if (obj->name) { 607 idr_remove(&dev->object_name_idr, obj->name); 608 obj->name = 0; 609 spin_unlock(&dev->object_name_lock); 610 /* 611 * The object name held a reference to this object, drop 612 * that now. 613 * 614 * This cannot be the last reference, since the handle holds one too. 615 */ 616 kref_put(&obj->refcount, drm_gem_object_ref_bug); 617 } else 618 spin_unlock(&dev->object_name_lock); 619 620 } 621 EXPORT_SYMBOL(drm_gem_object_handle_free); 622 623 void drm_gem_vm_open(struct vm_area_struct *vma) 624 { 625 struct drm_gem_object *obj = vma->vm_private_data; 626 627 drm_gem_object_reference(obj); 628 629 mutex_lock(&obj->dev->struct_mutex); 630 drm_vm_open_locked(obj->dev, vma); 631 mutex_unlock(&obj->dev->struct_mutex); 632 } 633 EXPORT_SYMBOL(drm_gem_vm_open); 634 635 void drm_gem_vm_close(struct vm_area_struct *vma) 636 { 637 struct drm_gem_object *obj = vma->vm_private_data; 638 struct drm_device *dev = obj->dev; 639 640 mutex_lock(&dev->struct_mutex); 641 drm_vm_close_locked(obj->dev, vma); 642 drm_gem_object_unreference(obj); 643 mutex_unlock(&dev->struct_mutex); 644 } 645 EXPORT_SYMBOL(drm_gem_vm_close); 646 647 648 /** 649 * drm_gem_mmap - memory map routine for GEM objects 650 * @filp: DRM file pointer 651 * @vma: VMA for the area to be mapped 652 * 653 * If a driver supports GEM object mapping, mmap calls on the DRM file 654 * descriptor will end up here. 655 * 656 * If we find the object based on the offset passed in (vma->vm_pgoff will 657 * contain the fake offset we created when the GTT map ioctl was called on 658 * the object), we set up the driver fault handler so that any accesses 659 * to the object can be trapped, to perform migration, GTT binding, surface 660 * register allocation, or performance monitoring. 661 */ 662 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 663 { 664 struct drm_file *priv = filp->private_data; 665 struct drm_device *dev = priv->minor->dev; 666 struct drm_gem_mm *mm = dev->mm_private; 667 struct drm_local_map *map = NULL; 668 struct drm_gem_object *obj; 669 struct drm_hash_item *hash; 670 int ret = 0; 671 672 if (drm_device_is_unplugged(dev)) 673 return -ENODEV; 674 675 mutex_lock(&dev->struct_mutex); 676 677 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 678 mutex_unlock(&dev->struct_mutex); 679 return drm_mmap(filp, vma); 680 } 681 682 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 683 if (!map || 684 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 685 ret = -EPERM; 686 goto out_unlock; 687 } 688 689 /* Check for valid size. */ 690 if (map->size < vma->vm_end - vma->vm_start) { 691 ret = -EINVAL; 692 goto out_unlock; 693 } 694 695 obj = map->handle; 696 if (!obj->dev->driver->gem_vm_ops) { 697 ret = -EINVAL; 698 goto out_unlock; 699 } 700 701 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 702 vma->vm_ops = obj->dev->driver->gem_vm_ops; 703 vma->vm_private_data = map->handle; 704 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 705 706 /* Take a ref for this mapping of the object, so that the fault 707 * handler can dereference the mmap offset's pointer to the object. 708 * This reference is cleaned up by the corresponding vm_close 709 * (which should happen whether the vma was created by this call, or 710 * by a vm_open due to mremap or partial unmap or whatever). 711 */ 712 drm_gem_object_reference(obj); 713 714 drm_vm_open_locked(dev, vma); 715 716 out_unlock: 717 mutex_unlock(&dev->struct_mutex); 718 719 return ret; 720 } 721 EXPORT_SYMBOL(drm_gem_mmap); 722