1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include "drmP.h" 39 40 /** @file drm_gem.c 41 * 42 * This file provides some of the base ioctls and library routines for 43 * the graphics memory manager implemented by each device driver. 44 * 45 * Because various devices have different requirements in terms of 46 * synchronization and migration strategies, implementing that is left up to 47 * the driver, and all that the general API provides should be generic -- 48 * allocating objects, reading/writing data with the cpu, freeing objects. 49 * Even there, platform-dependent optimizations for reading/writing data with 50 * the CPU mean we'll likely hook those out to driver-specific calls. However, 51 * the DRI2 implementation wants to have at least allocate/mmap be generic. 52 * 53 * The goal was to have swap-backed object allocation managed through 54 * struct file. However, file descriptors as handles to a struct file have 55 * two major failings: 56 * - Process limits prevent more than 1024 or so being used at a time by 57 * default. 58 * - Inability to allocate high fds will aggravate the X Server's select() 59 * handling, and likely that of many GL client applications as well. 60 * 61 * This led to a plan of using our own integer IDs (called handles, following 62 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 63 * ioctls. The objects themselves will still include the struct file so 64 * that we can transition to fds if the required kernel infrastructure shows 65 * up at a later date, and as our interface with shmfs for memory allocation. 66 */ 67 68 /* 69 * We make up offsets for buffer objects so we can recognize them at 70 * mmap time. 71 */ 72 73 /* pgoff in mmap is an unsigned long, so we need to make sure that 74 * the faked up offset will fit 75 */ 76 77 #if BITS_PER_LONG == 64 78 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 79 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 80 #else 81 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 82 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 83 #endif 84 85 /** 86 * Initialize the GEM device fields 87 */ 88 89 int 90 drm_gem_init(struct drm_device *dev) 91 { 92 struct drm_gem_mm *mm; 93 94 spin_lock_init(&dev->object_name_lock); 95 idr_init(&dev->object_name_idr); 96 97 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 98 if (!mm) { 99 DRM_ERROR("out of memory\n"); 100 return -ENOMEM; 101 } 102 103 dev->mm_private = mm; 104 105 if (drm_ht_create(&mm->offset_hash, 12)) { 106 kfree(mm); 107 return -ENOMEM; 108 } 109 110 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 111 DRM_FILE_PAGE_OFFSET_SIZE)) { 112 drm_ht_remove(&mm->offset_hash); 113 kfree(mm); 114 return -ENOMEM; 115 } 116 117 return 0; 118 } 119 120 void 121 drm_gem_destroy(struct drm_device *dev) 122 { 123 struct drm_gem_mm *mm = dev->mm_private; 124 125 drm_mm_takedown(&mm->offset_manager); 126 drm_ht_remove(&mm->offset_hash); 127 kfree(mm); 128 dev->mm_private = NULL; 129 } 130 131 /** 132 * Initialize an already allocated GEM object of the specified size with 133 * shmfs backing store. 134 */ 135 int drm_gem_object_init(struct drm_device *dev, 136 struct drm_gem_object *obj, size_t size) 137 { 138 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 139 140 obj->dev = dev; 141 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 142 if (IS_ERR(obj->filp)) 143 return -ENOMEM; 144 145 kref_init(&obj->refcount); 146 atomic_set(&obj->handle_count, 0); 147 obj->size = size; 148 149 return 0; 150 } 151 EXPORT_SYMBOL(drm_gem_object_init); 152 153 /** 154 * Initialize an already allocated GEM object of the specified size with 155 * no GEM provided backing store. Instead the caller is responsible for 156 * backing the object and handling it. 157 */ 158 int drm_gem_private_object_init(struct drm_device *dev, 159 struct drm_gem_object *obj, size_t size) 160 { 161 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 162 163 obj->dev = dev; 164 obj->filp = NULL; 165 166 kref_init(&obj->refcount); 167 atomic_set(&obj->handle_count, 0); 168 obj->size = size; 169 170 return 0; 171 } 172 EXPORT_SYMBOL(drm_gem_private_object_init); 173 174 /** 175 * Allocate a GEM object of the specified size with shmfs backing store 176 */ 177 struct drm_gem_object * 178 drm_gem_object_alloc(struct drm_device *dev, size_t size) 179 { 180 struct drm_gem_object *obj; 181 182 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 183 if (!obj) 184 goto free; 185 186 if (drm_gem_object_init(dev, obj, size) != 0) 187 goto free; 188 189 if (dev->driver->gem_init_object != NULL && 190 dev->driver->gem_init_object(obj) != 0) { 191 goto fput; 192 } 193 return obj; 194 fput: 195 /* Object_init mangles the global counters - readjust them. */ 196 fput(obj->filp); 197 free: 198 kfree(obj); 199 return NULL; 200 } 201 EXPORT_SYMBOL(drm_gem_object_alloc); 202 203 /** 204 * Removes the mapping from handle to filp for this object. 205 */ 206 int 207 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 208 { 209 struct drm_device *dev; 210 struct drm_gem_object *obj; 211 212 /* This is gross. The idr system doesn't let us try a delete and 213 * return an error code. It just spews if you fail at deleting. 214 * So, we have to grab a lock around finding the object and then 215 * doing the delete on it and dropping the refcount, or the user 216 * could race us to double-decrement the refcount and cause a 217 * use-after-free later. Given the frequency of our handle lookups, 218 * we may want to use ida for number allocation and a hash table 219 * for the pointers, anyway. 220 */ 221 spin_lock(&filp->table_lock); 222 223 /* Check if we currently have a reference on the object */ 224 obj = idr_find(&filp->object_idr, handle); 225 if (obj == NULL) { 226 spin_unlock(&filp->table_lock); 227 return -EINVAL; 228 } 229 dev = obj->dev; 230 231 /* Release reference and decrement refcount. */ 232 idr_remove(&filp->object_idr, handle); 233 spin_unlock(&filp->table_lock); 234 235 if (dev->driver->gem_close_object) 236 dev->driver->gem_close_object(obj, filp); 237 drm_gem_object_handle_unreference_unlocked(obj); 238 239 return 0; 240 } 241 EXPORT_SYMBOL(drm_gem_handle_delete); 242 243 /** 244 * Create a handle for this object. This adds a handle reference 245 * to the object, which includes a regular reference count. Callers 246 * will likely want to dereference the object afterwards. 247 */ 248 int 249 drm_gem_handle_create(struct drm_file *file_priv, 250 struct drm_gem_object *obj, 251 u32 *handlep) 252 { 253 struct drm_device *dev = obj->dev; 254 int ret; 255 256 /* 257 * Get the user-visible handle using idr. 258 */ 259 again: 260 /* ensure there is space available to allocate a handle */ 261 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 262 return -ENOMEM; 263 264 /* do the allocation under our spinlock */ 265 spin_lock(&file_priv->table_lock); 266 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 267 spin_unlock(&file_priv->table_lock); 268 if (ret == -EAGAIN) 269 goto again; 270 271 if (ret != 0) 272 return ret; 273 274 drm_gem_object_handle_reference(obj); 275 276 if (dev->driver->gem_open_object) { 277 ret = dev->driver->gem_open_object(obj, file_priv); 278 if (ret) { 279 drm_gem_handle_delete(file_priv, *handlep); 280 return ret; 281 } 282 } 283 284 return 0; 285 } 286 EXPORT_SYMBOL(drm_gem_handle_create); 287 288 289 /** 290 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 291 * @obj: obj in question 292 * 293 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 294 */ 295 void 296 drm_gem_free_mmap_offset(struct drm_gem_object *obj) 297 { 298 struct drm_device *dev = obj->dev; 299 struct drm_gem_mm *mm = dev->mm_private; 300 struct drm_map_list *list = &obj->map_list; 301 302 drm_ht_remove_item(&mm->offset_hash, &list->hash); 303 drm_mm_put_block(list->file_offset_node); 304 kfree(list->map); 305 list->map = NULL; 306 } 307 EXPORT_SYMBOL(drm_gem_free_mmap_offset); 308 309 /** 310 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 311 * @obj: obj in question 312 * 313 * GEM memory mapping works by handing back to userspace a fake mmap offset 314 * it can use in a subsequent mmap(2) call. The DRM core code then looks 315 * up the object based on the offset and sets up the various memory mapping 316 * structures. 317 * 318 * This routine allocates and attaches a fake offset for @obj. 319 */ 320 int 321 drm_gem_create_mmap_offset(struct drm_gem_object *obj) 322 { 323 struct drm_device *dev = obj->dev; 324 struct drm_gem_mm *mm = dev->mm_private; 325 struct drm_map_list *list; 326 struct drm_local_map *map; 327 int ret = 0; 328 329 /* Set the object up for mmap'ing */ 330 list = &obj->map_list; 331 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 332 if (!list->map) 333 return -ENOMEM; 334 335 map = list->map; 336 map->type = _DRM_GEM; 337 map->size = obj->size; 338 map->handle = obj; 339 340 /* Get a DRM GEM mmap offset allocated... */ 341 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 342 obj->size / PAGE_SIZE, 0, 0); 343 344 if (!list->file_offset_node) { 345 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 346 ret = -ENOSPC; 347 goto out_free_list; 348 } 349 350 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 351 obj->size / PAGE_SIZE, 0); 352 if (!list->file_offset_node) { 353 ret = -ENOMEM; 354 goto out_free_list; 355 } 356 357 list->hash.key = list->file_offset_node->start; 358 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); 359 if (ret) { 360 DRM_ERROR("failed to add to map hash\n"); 361 goto out_free_mm; 362 } 363 364 return 0; 365 366 out_free_mm: 367 drm_mm_put_block(list->file_offset_node); 368 out_free_list: 369 kfree(list->map); 370 list->map = NULL; 371 372 return ret; 373 } 374 EXPORT_SYMBOL(drm_gem_create_mmap_offset); 375 376 /** Returns a reference to the object named by the handle. */ 377 struct drm_gem_object * 378 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 379 u32 handle) 380 { 381 struct drm_gem_object *obj; 382 383 spin_lock(&filp->table_lock); 384 385 /* Check if we currently have a reference on the object */ 386 obj = idr_find(&filp->object_idr, handle); 387 if (obj == NULL) { 388 spin_unlock(&filp->table_lock); 389 return NULL; 390 } 391 392 drm_gem_object_reference(obj); 393 394 spin_unlock(&filp->table_lock); 395 396 return obj; 397 } 398 EXPORT_SYMBOL(drm_gem_object_lookup); 399 400 /** 401 * Releases the handle to an mm object. 402 */ 403 int 404 drm_gem_close_ioctl(struct drm_device *dev, void *data, 405 struct drm_file *file_priv) 406 { 407 struct drm_gem_close *args = data; 408 int ret; 409 410 if (!(dev->driver->driver_features & DRIVER_GEM)) 411 return -ENODEV; 412 413 ret = drm_gem_handle_delete(file_priv, args->handle); 414 415 return ret; 416 } 417 418 /** 419 * Create a global name for an object, returning the name. 420 * 421 * Note that the name does not hold a reference; when the object 422 * is freed, the name goes away. 423 */ 424 int 425 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 426 struct drm_file *file_priv) 427 { 428 struct drm_gem_flink *args = data; 429 struct drm_gem_object *obj; 430 int ret; 431 432 if (!(dev->driver->driver_features & DRIVER_GEM)) 433 return -ENODEV; 434 435 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 436 if (obj == NULL) 437 return -ENOENT; 438 439 again: 440 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { 441 ret = -ENOMEM; 442 goto err; 443 } 444 445 spin_lock(&dev->object_name_lock); 446 if (!obj->name) { 447 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 448 &obj->name); 449 args->name = (uint64_t) obj->name; 450 spin_unlock(&dev->object_name_lock); 451 452 if (ret == -EAGAIN) 453 goto again; 454 455 if (ret != 0) 456 goto err; 457 458 /* Allocate a reference for the name table. */ 459 drm_gem_object_reference(obj); 460 } else { 461 args->name = (uint64_t) obj->name; 462 spin_unlock(&dev->object_name_lock); 463 ret = 0; 464 } 465 466 err: 467 drm_gem_object_unreference_unlocked(obj); 468 return ret; 469 } 470 471 /** 472 * Open an object using the global name, returning a handle and the size. 473 * 474 * This handle (of course) holds a reference to the object, so the object 475 * will not go away until the handle is deleted. 476 */ 477 int 478 drm_gem_open_ioctl(struct drm_device *dev, void *data, 479 struct drm_file *file_priv) 480 { 481 struct drm_gem_open *args = data; 482 struct drm_gem_object *obj; 483 int ret; 484 u32 handle; 485 486 if (!(dev->driver->driver_features & DRIVER_GEM)) 487 return -ENODEV; 488 489 spin_lock(&dev->object_name_lock); 490 obj = idr_find(&dev->object_name_idr, (int) args->name); 491 if (obj) 492 drm_gem_object_reference(obj); 493 spin_unlock(&dev->object_name_lock); 494 if (!obj) 495 return -ENOENT; 496 497 ret = drm_gem_handle_create(file_priv, obj, &handle); 498 drm_gem_object_unreference_unlocked(obj); 499 if (ret) 500 return ret; 501 502 args->handle = handle; 503 args->size = obj->size; 504 505 return 0; 506 } 507 508 /** 509 * Called at device open time, sets up the structure for handling refcounting 510 * of mm objects. 511 */ 512 void 513 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 514 { 515 idr_init(&file_private->object_idr); 516 spin_lock_init(&file_private->table_lock); 517 } 518 519 /** 520 * Called at device close to release the file's 521 * handle references on objects. 522 */ 523 static int 524 drm_gem_object_release_handle(int id, void *ptr, void *data) 525 { 526 struct drm_file *file_priv = data; 527 struct drm_gem_object *obj = ptr; 528 struct drm_device *dev = obj->dev; 529 530 if (dev->driver->gem_close_object) 531 dev->driver->gem_close_object(obj, file_priv); 532 533 drm_gem_object_handle_unreference_unlocked(obj); 534 535 return 0; 536 } 537 538 /** 539 * Called at close time when the filp is going away. 540 * 541 * Releases any remaining references on objects by this filp. 542 */ 543 void 544 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 545 { 546 idr_for_each(&file_private->object_idr, 547 &drm_gem_object_release_handle, file_private); 548 549 idr_remove_all(&file_private->object_idr); 550 idr_destroy(&file_private->object_idr); 551 } 552 553 void 554 drm_gem_object_release(struct drm_gem_object *obj) 555 { 556 if (obj->filp) 557 fput(obj->filp); 558 } 559 EXPORT_SYMBOL(drm_gem_object_release); 560 561 /** 562 * Called after the last reference to the object has been lost. 563 * Must be called holding struct_ mutex 564 * 565 * Frees the object 566 */ 567 void 568 drm_gem_object_free(struct kref *kref) 569 { 570 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 571 struct drm_device *dev = obj->dev; 572 573 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 574 575 if (dev->driver->gem_free_object != NULL) 576 dev->driver->gem_free_object(obj); 577 } 578 EXPORT_SYMBOL(drm_gem_object_free); 579 580 static void drm_gem_object_ref_bug(struct kref *list_kref) 581 { 582 BUG(); 583 } 584 585 /** 586 * Called after the last handle to the object has been closed 587 * 588 * Removes any name for the object. Note that this must be 589 * called before drm_gem_object_free or we'll be touching 590 * freed memory 591 */ 592 void drm_gem_object_handle_free(struct drm_gem_object *obj) 593 { 594 struct drm_device *dev = obj->dev; 595 596 /* Remove any name for this object */ 597 spin_lock(&dev->object_name_lock); 598 if (obj->name) { 599 idr_remove(&dev->object_name_idr, obj->name); 600 obj->name = 0; 601 spin_unlock(&dev->object_name_lock); 602 /* 603 * The object name held a reference to this object, drop 604 * that now. 605 * 606 * This cannot be the last reference, since the handle holds one too. 607 */ 608 kref_put(&obj->refcount, drm_gem_object_ref_bug); 609 } else 610 spin_unlock(&dev->object_name_lock); 611 612 } 613 EXPORT_SYMBOL(drm_gem_object_handle_free); 614 615 void drm_gem_vm_open(struct vm_area_struct *vma) 616 { 617 struct drm_gem_object *obj = vma->vm_private_data; 618 619 drm_gem_object_reference(obj); 620 621 mutex_lock(&obj->dev->struct_mutex); 622 drm_vm_open_locked(vma); 623 mutex_unlock(&obj->dev->struct_mutex); 624 } 625 EXPORT_SYMBOL(drm_gem_vm_open); 626 627 void drm_gem_vm_close(struct vm_area_struct *vma) 628 { 629 struct drm_gem_object *obj = vma->vm_private_data; 630 struct drm_device *dev = obj->dev; 631 632 mutex_lock(&dev->struct_mutex); 633 drm_vm_close_locked(vma); 634 drm_gem_object_unreference(obj); 635 mutex_unlock(&dev->struct_mutex); 636 } 637 EXPORT_SYMBOL(drm_gem_vm_close); 638 639 640 /** 641 * drm_gem_mmap - memory map routine for GEM objects 642 * @filp: DRM file pointer 643 * @vma: VMA for the area to be mapped 644 * 645 * If a driver supports GEM object mapping, mmap calls on the DRM file 646 * descriptor will end up here. 647 * 648 * If we find the object based on the offset passed in (vma->vm_pgoff will 649 * contain the fake offset we created when the GTT map ioctl was called on 650 * the object), we set up the driver fault handler so that any accesses 651 * to the object can be trapped, to perform migration, GTT binding, surface 652 * register allocation, or performance monitoring. 653 */ 654 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 655 { 656 struct drm_file *priv = filp->private_data; 657 struct drm_device *dev = priv->minor->dev; 658 struct drm_gem_mm *mm = dev->mm_private; 659 struct drm_local_map *map = NULL; 660 struct drm_gem_object *obj; 661 struct drm_hash_item *hash; 662 int ret = 0; 663 664 mutex_lock(&dev->struct_mutex); 665 666 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 667 mutex_unlock(&dev->struct_mutex); 668 return drm_mmap(filp, vma); 669 } 670 671 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 672 if (!map || 673 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 674 ret = -EPERM; 675 goto out_unlock; 676 } 677 678 /* Check for valid size. */ 679 if (map->size < vma->vm_end - vma->vm_start) { 680 ret = -EINVAL; 681 goto out_unlock; 682 } 683 684 obj = map->handle; 685 if (!obj->dev->driver->gem_vm_ops) { 686 ret = -EINVAL; 687 goto out_unlock; 688 } 689 690 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 691 vma->vm_ops = obj->dev->driver->gem_vm_ops; 692 vma->vm_private_data = map->handle; 693 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 694 695 /* Take a ref for this mapping of the object, so that the fault 696 * handler can dereference the mmap offset's pointer to the object. 697 * This reference is cleaned up by the corresponding vm_close 698 * (which should happen whether the vma was created by this call, or 699 * by a vm_open due to mremap or partial unmap or whatever). 700 */ 701 drm_gem_object_reference(obj); 702 703 vma->vm_file = filp; /* Needed for drm_vm_open() */ 704 drm_vm_open_locked(vma); 705 706 out_unlock: 707 mutex_unlock(&dev->struct_mutex); 708 709 return ret; 710 } 711 EXPORT_SYMBOL(drm_gem_mmap); 712