1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include <linux/shmem_fs.h> 38 #include "drmP.h" 39 40 /** @file drm_gem.c 41 * 42 * This file provides some of the base ioctls and library routines for 43 * the graphics memory manager implemented by each device driver. 44 * 45 * Because various devices have different requirements in terms of 46 * synchronization and migration strategies, implementing that is left up to 47 * the driver, and all that the general API provides should be generic -- 48 * allocating objects, reading/writing data with the cpu, freeing objects. 49 * Even there, platform-dependent optimizations for reading/writing data with 50 * the CPU mean we'll likely hook those out to driver-specific calls. However, 51 * the DRI2 implementation wants to have at least allocate/mmap be generic. 52 * 53 * The goal was to have swap-backed object allocation managed through 54 * struct file. However, file descriptors as handles to a struct file have 55 * two major failings: 56 * - Process limits prevent more than 1024 or so being used at a time by 57 * default. 58 * - Inability to allocate high fds will aggravate the X Server's select() 59 * handling, and likely that of many GL client applications as well. 60 * 61 * This led to a plan of using our own integer IDs (called handles, following 62 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 63 * ioctls. The objects themselves will still include the struct file so 64 * that we can transition to fds if the required kernel infrastructure shows 65 * up at a later date, and as our interface with shmfs for memory allocation. 66 */ 67 68 /* 69 * We make up offsets for buffer objects so we can recognize them at 70 * mmap time. 71 */ 72 73 /* pgoff in mmap is an unsigned long, so we need to make sure that 74 * the faked up offset will fit 75 */ 76 77 #if BITS_PER_LONG == 64 78 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 79 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 80 #else 81 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 82 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 83 #endif 84 85 /** 86 * Initialize the GEM device fields 87 */ 88 89 int 90 drm_gem_init(struct drm_device *dev) 91 { 92 struct drm_gem_mm *mm; 93 94 spin_lock_init(&dev->object_name_lock); 95 idr_init(&dev->object_name_idr); 96 97 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 98 if (!mm) { 99 DRM_ERROR("out of memory\n"); 100 return -ENOMEM; 101 } 102 103 dev->mm_private = mm; 104 105 if (drm_ht_create(&mm->offset_hash, 12)) { 106 kfree(mm); 107 return -ENOMEM; 108 } 109 110 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 111 DRM_FILE_PAGE_OFFSET_SIZE)) { 112 drm_ht_remove(&mm->offset_hash); 113 kfree(mm); 114 return -ENOMEM; 115 } 116 117 return 0; 118 } 119 120 void 121 drm_gem_destroy(struct drm_device *dev) 122 { 123 struct drm_gem_mm *mm = dev->mm_private; 124 125 drm_mm_takedown(&mm->offset_manager); 126 drm_ht_remove(&mm->offset_hash); 127 kfree(mm); 128 dev->mm_private = NULL; 129 } 130 131 /** 132 * Initialize an already allocated GEM object of the specified size with 133 * shmfs backing store. 134 */ 135 int drm_gem_object_init(struct drm_device *dev, 136 struct drm_gem_object *obj, size_t size) 137 { 138 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 139 140 obj->dev = dev; 141 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 142 if (IS_ERR(obj->filp)) 143 return -ENOMEM; 144 145 kref_init(&obj->refcount); 146 atomic_set(&obj->handle_count, 0); 147 obj->size = size; 148 149 return 0; 150 } 151 EXPORT_SYMBOL(drm_gem_object_init); 152 153 /** 154 * Initialize an already allocated GEM object of the specified size with 155 * no GEM provided backing store. Instead the caller is responsible for 156 * backing the object and handling it. 157 */ 158 int drm_gem_private_object_init(struct drm_device *dev, 159 struct drm_gem_object *obj, size_t size) 160 { 161 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 162 163 obj->dev = dev; 164 obj->filp = NULL; 165 166 kref_init(&obj->refcount); 167 atomic_set(&obj->handle_count, 0); 168 obj->size = size; 169 170 return 0; 171 } 172 EXPORT_SYMBOL(drm_gem_private_object_init); 173 174 /** 175 * Allocate a GEM object of the specified size with shmfs backing store 176 */ 177 struct drm_gem_object * 178 drm_gem_object_alloc(struct drm_device *dev, size_t size) 179 { 180 struct drm_gem_object *obj; 181 182 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 183 if (!obj) 184 goto free; 185 186 if (drm_gem_object_init(dev, obj, size) != 0) 187 goto free; 188 189 if (dev->driver->gem_init_object != NULL && 190 dev->driver->gem_init_object(obj) != 0) { 191 goto fput; 192 } 193 return obj; 194 fput: 195 /* Object_init mangles the global counters - readjust them. */ 196 fput(obj->filp); 197 free: 198 kfree(obj); 199 return NULL; 200 } 201 EXPORT_SYMBOL(drm_gem_object_alloc); 202 203 /** 204 * Removes the mapping from handle to filp for this object. 205 */ 206 int 207 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 208 { 209 struct drm_device *dev; 210 struct drm_gem_object *obj; 211 212 /* This is gross. The idr system doesn't let us try a delete and 213 * return an error code. It just spews if you fail at deleting. 214 * So, we have to grab a lock around finding the object and then 215 * doing the delete on it and dropping the refcount, or the user 216 * could race us to double-decrement the refcount and cause a 217 * use-after-free later. Given the frequency of our handle lookups, 218 * we may want to use ida for number allocation and a hash table 219 * for the pointers, anyway. 220 */ 221 spin_lock(&filp->table_lock); 222 223 /* Check if we currently have a reference on the object */ 224 obj = idr_find(&filp->object_idr, handle); 225 if (obj == NULL) { 226 spin_unlock(&filp->table_lock); 227 return -EINVAL; 228 } 229 dev = obj->dev; 230 231 /* Release reference and decrement refcount. */ 232 idr_remove(&filp->object_idr, handle); 233 spin_unlock(&filp->table_lock); 234 235 if (dev->driver->gem_close_object) 236 dev->driver->gem_close_object(obj, filp); 237 drm_gem_object_handle_unreference_unlocked(obj); 238 239 return 0; 240 } 241 EXPORT_SYMBOL(drm_gem_handle_delete); 242 243 /** 244 * Create a handle for this object. This adds a handle reference 245 * to the object, which includes a regular reference count. Callers 246 * will likely want to dereference the object afterwards. 247 */ 248 int 249 drm_gem_handle_create(struct drm_file *file_priv, 250 struct drm_gem_object *obj, 251 u32 *handlep) 252 { 253 struct drm_device *dev = obj->dev; 254 int ret; 255 256 /* 257 * Get the user-visible handle using idr. 258 */ 259 again: 260 /* ensure there is space available to allocate a handle */ 261 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 262 return -ENOMEM; 263 264 /* do the allocation under our spinlock */ 265 spin_lock(&file_priv->table_lock); 266 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 267 spin_unlock(&file_priv->table_lock); 268 if (ret == -EAGAIN) 269 goto again; 270 271 if (ret != 0) 272 return ret; 273 274 drm_gem_object_handle_reference(obj); 275 276 if (dev->driver->gem_open_object) { 277 ret = dev->driver->gem_open_object(obj, file_priv); 278 if (ret) { 279 drm_gem_handle_delete(file_priv, *handlep); 280 return ret; 281 } 282 } 283 284 return 0; 285 } 286 EXPORT_SYMBOL(drm_gem_handle_create); 287 288 /** Returns a reference to the object named by the handle. */ 289 struct drm_gem_object * 290 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 291 u32 handle) 292 { 293 struct drm_gem_object *obj; 294 295 spin_lock(&filp->table_lock); 296 297 /* Check if we currently have a reference on the object */ 298 obj = idr_find(&filp->object_idr, handle); 299 if (obj == NULL) { 300 spin_unlock(&filp->table_lock); 301 return NULL; 302 } 303 304 drm_gem_object_reference(obj); 305 306 spin_unlock(&filp->table_lock); 307 308 return obj; 309 } 310 EXPORT_SYMBOL(drm_gem_object_lookup); 311 312 /** 313 * Releases the handle to an mm object. 314 */ 315 int 316 drm_gem_close_ioctl(struct drm_device *dev, void *data, 317 struct drm_file *file_priv) 318 { 319 struct drm_gem_close *args = data; 320 int ret; 321 322 if (!(dev->driver->driver_features & DRIVER_GEM)) 323 return -ENODEV; 324 325 ret = drm_gem_handle_delete(file_priv, args->handle); 326 327 return ret; 328 } 329 330 /** 331 * Create a global name for an object, returning the name. 332 * 333 * Note that the name does not hold a reference; when the object 334 * is freed, the name goes away. 335 */ 336 int 337 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 338 struct drm_file *file_priv) 339 { 340 struct drm_gem_flink *args = data; 341 struct drm_gem_object *obj; 342 int ret; 343 344 if (!(dev->driver->driver_features & DRIVER_GEM)) 345 return -ENODEV; 346 347 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 348 if (obj == NULL) 349 return -ENOENT; 350 351 again: 352 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { 353 ret = -ENOMEM; 354 goto err; 355 } 356 357 spin_lock(&dev->object_name_lock); 358 if (!obj->name) { 359 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 360 &obj->name); 361 args->name = (uint64_t) obj->name; 362 spin_unlock(&dev->object_name_lock); 363 364 if (ret == -EAGAIN) 365 goto again; 366 367 if (ret != 0) 368 goto err; 369 370 /* Allocate a reference for the name table. */ 371 drm_gem_object_reference(obj); 372 } else { 373 args->name = (uint64_t) obj->name; 374 spin_unlock(&dev->object_name_lock); 375 ret = 0; 376 } 377 378 err: 379 drm_gem_object_unreference_unlocked(obj); 380 return ret; 381 } 382 383 /** 384 * Open an object using the global name, returning a handle and the size. 385 * 386 * This handle (of course) holds a reference to the object, so the object 387 * will not go away until the handle is deleted. 388 */ 389 int 390 drm_gem_open_ioctl(struct drm_device *dev, void *data, 391 struct drm_file *file_priv) 392 { 393 struct drm_gem_open *args = data; 394 struct drm_gem_object *obj; 395 int ret; 396 u32 handle; 397 398 if (!(dev->driver->driver_features & DRIVER_GEM)) 399 return -ENODEV; 400 401 spin_lock(&dev->object_name_lock); 402 obj = idr_find(&dev->object_name_idr, (int) args->name); 403 if (obj) 404 drm_gem_object_reference(obj); 405 spin_unlock(&dev->object_name_lock); 406 if (!obj) 407 return -ENOENT; 408 409 ret = drm_gem_handle_create(file_priv, obj, &handle); 410 drm_gem_object_unreference_unlocked(obj); 411 if (ret) 412 return ret; 413 414 args->handle = handle; 415 args->size = obj->size; 416 417 return 0; 418 } 419 420 /** 421 * Called at device open time, sets up the structure for handling refcounting 422 * of mm objects. 423 */ 424 void 425 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 426 { 427 idr_init(&file_private->object_idr); 428 spin_lock_init(&file_private->table_lock); 429 } 430 431 /** 432 * Called at device close to release the file's 433 * handle references on objects. 434 */ 435 static int 436 drm_gem_object_release_handle(int id, void *ptr, void *data) 437 { 438 struct drm_file *file_priv = data; 439 struct drm_gem_object *obj = ptr; 440 struct drm_device *dev = obj->dev; 441 442 if (dev->driver->gem_close_object) 443 dev->driver->gem_close_object(obj, file_priv); 444 445 drm_gem_object_handle_unreference_unlocked(obj); 446 447 return 0; 448 } 449 450 /** 451 * Called at close time when the filp is going away. 452 * 453 * Releases any remaining references on objects by this filp. 454 */ 455 void 456 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 457 { 458 idr_for_each(&file_private->object_idr, 459 &drm_gem_object_release_handle, file_private); 460 461 idr_remove_all(&file_private->object_idr); 462 idr_destroy(&file_private->object_idr); 463 } 464 465 void 466 drm_gem_object_release(struct drm_gem_object *obj) 467 { 468 if (obj->filp) 469 fput(obj->filp); 470 } 471 EXPORT_SYMBOL(drm_gem_object_release); 472 473 /** 474 * Called after the last reference to the object has been lost. 475 * Must be called holding struct_ mutex 476 * 477 * Frees the object 478 */ 479 void 480 drm_gem_object_free(struct kref *kref) 481 { 482 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 483 struct drm_device *dev = obj->dev; 484 485 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 486 487 if (dev->driver->gem_free_object != NULL) 488 dev->driver->gem_free_object(obj); 489 } 490 EXPORT_SYMBOL(drm_gem_object_free); 491 492 static void drm_gem_object_ref_bug(struct kref *list_kref) 493 { 494 BUG(); 495 } 496 497 /** 498 * Called after the last handle to the object has been closed 499 * 500 * Removes any name for the object. Note that this must be 501 * called before drm_gem_object_free or we'll be touching 502 * freed memory 503 */ 504 void drm_gem_object_handle_free(struct drm_gem_object *obj) 505 { 506 struct drm_device *dev = obj->dev; 507 508 /* Remove any name for this object */ 509 spin_lock(&dev->object_name_lock); 510 if (obj->name) { 511 idr_remove(&dev->object_name_idr, obj->name); 512 obj->name = 0; 513 spin_unlock(&dev->object_name_lock); 514 /* 515 * The object name held a reference to this object, drop 516 * that now. 517 * 518 * This cannot be the last reference, since the handle holds one too. 519 */ 520 kref_put(&obj->refcount, drm_gem_object_ref_bug); 521 } else 522 spin_unlock(&dev->object_name_lock); 523 524 } 525 EXPORT_SYMBOL(drm_gem_object_handle_free); 526 527 void drm_gem_vm_open(struct vm_area_struct *vma) 528 { 529 struct drm_gem_object *obj = vma->vm_private_data; 530 531 drm_gem_object_reference(obj); 532 533 mutex_lock(&obj->dev->struct_mutex); 534 drm_vm_open_locked(vma); 535 mutex_unlock(&obj->dev->struct_mutex); 536 } 537 EXPORT_SYMBOL(drm_gem_vm_open); 538 539 void drm_gem_vm_close(struct vm_area_struct *vma) 540 { 541 struct drm_gem_object *obj = vma->vm_private_data; 542 struct drm_device *dev = obj->dev; 543 544 mutex_lock(&dev->struct_mutex); 545 drm_vm_close_locked(vma); 546 drm_gem_object_unreference(obj); 547 mutex_unlock(&dev->struct_mutex); 548 } 549 EXPORT_SYMBOL(drm_gem_vm_close); 550 551 552 /** 553 * drm_gem_mmap - memory map routine for GEM objects 554 * @filp: DRM file pointer 555 * @vma: VMA for the area to be mapped 556 * 557 * If a driver supports GEM object mapping, mmap calls on the DRM file 558 * descriptor will end up here. 559 * 560 * If we find the object based on the offset passed in (vma->vm_pgoff will 561 * contain the fake offset we created when the GTT map ioctl was called on 562 * the object), we set up the driver fault handler so that any accesses 563 * to the object can be trapped, to perform migration, GTT binding, surface 564 * register allocation, or performance monitoring. 565 */ 566 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 567 { 568 struct drm_file *priv = filp->private_data; 569 struct drm_device *dev = priv->minor->dev; 570 struct drm_gem_mm *mm = dev->mm_private; 571 struct drm_local_map *map = NULL; 572 struct drm_gem_object *obj; 573 struct drm_hash_item *hash; 574 int ret = 0; 575 576 mutex_lock(&dev->struct_mutex); 577 578 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 579 mutex_unlock(&dev->struct_mutex); 580 return drm_mmap(filp, vma); 581 } 582 583 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 584 if (!map || 585 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 586 ret = -EPERM; 587 goto out_unlock; 588 } 589 590 /* Check for valid size. */ 591 if (map->size < vma->vm_end - vma->vm_start) { 592 ret = -EINVAL; 593 goto out_unlock; 594 } 595 596 obj = map->handle; 597 if (!obj->dev->driver->gem_vm_ops) { 598 ret = -EINVAL; 599 goto out_unlock; 600 } 601 602 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 603 vma->vm_ops = obj->dev->driver->gem_vm_ops; 604 vma->vm_private_data = map->handle; 605 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 606 607 /* Take a ref for this mapping of the object, so that the fault 608 * handler can dereference the mmap offset's pointer to the object. 609 * This reference is cleaned up by the corresponding vm_close 610 * (which should happen whether the vma was created by this call, or 611 * by a vm_open due to mremap or partial unmap or whatever). 612 */ 613 drm_gem_object_reference(obj); 614 615 vma->vm_file = filp; /* Needed for drm_vm_open() */ 616 drm_vm_open_locked(vma); 617 618 out_unlock: 619 mutex_unlock(&dev->struct_mutex); 620 621 return ret; 622 } 623 EXPORT_SYMBOL(drm_gem_mmap); 624