1 /* 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 3 * 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Author Rickard E. (Rik) Faith <faith@valinux.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/mount.h> 34 #include <linux/pseudo_fs.h> 35 #include <linux/slab.h> 36 #include <linux/srcu.h> 37 38 #include <drm/drm_client.h> 39 #include <drm/drm_color_mgmt.h> 40 #include <drm/drm_drv.h> 41 #include <drm/drm_file.h> 42 #include <drm/drm_managed.h> 43 #include <drm/drm_mode_object.h> 44 #include <drm/drm_print.h> 45 46 #include "drm_crtc_internal.h" 47 #include "drm_internal.h" 48 #include "drm_legacy.h" 49 50 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); 51 MODULE_DESCRIPTION("DRM shared core routines"); 52 MODULE_LICENSE("GPL and additional rights"); 53 54 static DEFINE_SPINLOCK(drm_minor_lock); 55 static struct idr drm_minors_idr; 56 57 /* 58 * If the drm core fails to init for whatever reason, 59 * we should prevent any drivers from registering with it. 60 * It's best to check this at drm_dev_init(), as some drivers 61 * prefer to embed struct drm_device into their own device 62 * structure and call drm_dev_init() themselves. 63 */ 64 static bool drm_core_init_complete = false; 65 66 static struct dentry *drm_debugfs_root; 67 68 DEFINE_STATIC_SRCU(drm_unplug_srcu); 69 70 /* 71 * DRM Minors 72 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 73 * of them is represented by a drm_minor object. Depending on the capabilities 74 * of the device-driver, different interfaces are registered. 75 * 76 * Minors can be accessed via dev->$minor_name. This pointer is either 77 * NULL or a valid drm_minor pointer and stays valid as long as the device is 78 * valid. This means, DRM minors have the same life-time as the underlying 79 * device. However, this doesn't mean that the minor is active. Minors are 80 * registered and unregistered dynamically according to device-state. 81 */ 82 83 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 84 unsigned int type) 85 { 86 switch (type) { 87 case DRM_MINOR_PRIMARY: 88 return &dev->primary; 89 case DRM_MINOR_RENDER: 90 return &dev->render; 91 default: 92 BUG(); 93 } 94 } 95 96 static void drm_minor_alloc_release(struct drm_device *dev, void *data) 97 { 98 struct drm_minor *minor = data; 99 unsigned long flags; 100 101 WARN_ON(dev != minor->dev); 102 103 put_device(minor->kdev); 104 105 spin_lock_irqsave(&drm_minor_lock, flags); 106 idr_remove(&drm_minors_idr, minor->index); 107 spin_unlock_irqrestore(&drm_minor_lock, flags); 108 } 109 110 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 111 { 112 struct drm_minor *minor; 113 unsigned long flags; 114 int r; 115 116 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); 117 if (!minor) 118 return -ENOMEM; 119 120 minor->type = type; 121 minor->dev = dev; 122 123 idr_preload(GFP_KERNEL); 124 spin_lock_irqsave(&drm_minor_lock, flags); 125 r = idr_alloc(&drm_minors_idr, 126 NULL, 127 64 * type, 128 64 * (type + 1), 129 GFP_NOWAIT); 130 spin_unlock_irqrestore(&drm_minor_lock, flags); 131 idr_preload_end(); 132 133 if (r < 0) 134 return r; 135 136 minor->index = r; 137 138 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); 139 if (r) 140 return r; 141 142 minor->kdev = drm_sysfs_minor_alloc(minor); 143 if (IS_ERR(minor->kdev)) 144 return PTR_ERR(minor->kdev); 145 146 *drm_minor_get_slot(dev, type) = minor; 147 return 0; 148 } 149 150 static int drm_minor_register(struct drm_device *dev, unsigned int type) 151 { 152 struct drm_minor *minor; 153 unsigned long flags; 154 int ret; 155 156 DRM_DEBUG("\n"); 157 158 minor = *drm_minor_get_slot(dev, type); 159 if (!minor) 160 return 0; 161 162 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 163 if (ret) { 164 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 165 goto err_debugfs; 166 } 167 168 ret = device_add(minor->kdev); 169 if (ret) 170 goto err_debugfs; 171 172 /* replace NULL with @minor so lookups will succeed from now on */ 173 spin_lock_irqsave(&drm_minor_lock, flags); 174 idr_replace(&drm_minors_idr, minor, minor->index); 175 spin_unlock_irqrestore(&drm_minor_lock, flags); 176 177 DRM_DEBUG("new minor registered %d\n", minor->index); 178 return 0; 179 180 err_debugfs: 181 drm_debugfs_cleanup(minor); 182 return ret; 183 } 184 185 static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 186 { 187 struct drm_minor *minor; 188 unsigned long flags; 189 190 minor = *drm_minor_get_slot(dev, type); 191 if (!minor || !device_is_registered(minor->kdev)) 192 return; 193 194 /* replace @minor with NULL so lookups will fail from now on */ 195 spin_lock_irqsave(&drm_minor_lock, flags); 196 idr_replace(&drm_minors_idr, NULL, minor->index); 197 spin_unlock_irqrestore(&drm_minor_lock, flags); 198 199 device_del(minor->kdev); 200 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 201 drm_debugfs_cleanup(minor); 202 } 203 204 /* 205 * Looks up the given minor-ID and returns the respective DRM-minor object. The 206 * refence-count of the underlying device is increased so you must release this 207 * object with drm_minor_release(). 208 * 209 * As long as you hold this minor, it is guaranteed that the object and the 210 * minor->dev pointer will stay valid! However, the device may get unplugged and 211 * unregistered while you hold the minor. 212 */ 213 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 214 { 215 struct drm_minor *minor; 216 unsigned long flags; 217 218 spin_lock_irqsave(&drm_minor_lock, flags); 219 minor = idr_find(&drm_minors_idr, minor_id); 220 if (minor) 221 drm_dev_get(minor->dev); 222 spin_unlock_irqrestore(&drm_minor_lock, flags); 223 224 if (!minor) { 225 return ERR_PTR(-ENODEV); 226 } else if (drm_dev_is_unplugged(minor->dev)) { 227 drm_dev_put(minor->dev); 228 return ERR_PTR(-ENODEV); 229 } 230 231 return minor; 232 } 233 234 void drm_minor_release(struct drm_minor *minor) 235 { 236 drm_dev_put(minor->dev); 237 } 238 239 /** 240 * DOC: driver instance overview 241 * 242 * A device instance for a drm driver is represented by &struct drm_device. This 243 * is allocated and initialized with devm_drm_dev_alloc(), usually from 244 * bus-specific ->probe() callbacks implemented by the driver. The driver then 245 * needs to initialize all the various subsystems for the drm device like memory 246 * management, vblank handling, modesetting support and initial output 247 * configuration plus obviously initialize all the corresponding hardware bits. 248 * Finally when everything is up and running and ready for userspace the device 249 * instance can be published using drm_dev_register(). 250 * 251 * There is also deprecated support for initalizing device instances using 252 * bus-specific helpers and the &drm_driver.load callback. But due to 253 * backwards-compatibility needs the device instance have to be published too 254 * early, which requires unpretty global locking to make safe and is therefore 255 * only support for existing drivers not yet converted to the new scheme. 256 * 257 * When cleaning up a device instance everything needs to be done in reverse: 258 * First unpublish the device instance with drm_dev_unregister(). Then clean up 259 * any other resources allocated at device initialization and drop the driver's 260 * reference to &drm_device using drm_dev_put(). 261 * 262 * Note that any allocation or resource which is visible to userspace must be 263 * released only when the final drm_dev_put() is called, and not when the 264 * driver is unbound from the underlying physical struct &device. Best to use 265 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and 266 * related functions. 267 * 268 * devres managed resources like devm_kmalloc() can only be used for resources 269 * directly related to the underlying hardware device, and only used in code 270 * paths fully protected by drm_dev_enter() and drm_dev_exit(). 271 * 272 * Display driver example 273 * ~~~~~~~~~~~~~~~~~~~~~~ 274 * 275 * The following example shows a typical structure of a DRM display driver. 276 * The example focus on the probe() function and the other functions that is 277 * almost always present and serves as a demonstration of devm_drm_dev_alloc(). 278 * 279 * .. code-block:: c 280 * 281 * struct driver_device { 282 * struct drm_device drm; 283 * void *userspace_facing; 284 * struct clk *pclk; 285 * }; 286 * 287 * static const struct drm_driver driver_drm_driver = { 288 * [...] 289 * }; 290 * 291 * static int driver_probe(struct platform_device *pdev) 292 * { 293 * struct driver_device *priv; 294 * struct drm_device *drm; 295 * int ret; 296 * 297 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, 298 * struct driver_device, drm); 299 * if (IS_ERR(priv)) 300 * return PTR_ERR(priv); 301 * drm = &priv->drm; 302 * 303 * ret = drmm_mode_config_init(drm); 304 * if (ret) 305 * return ret; 306 * 307 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); 308 * if (!priv->userspace_facing) 309 * return -ENOMEM; 310 * 311 * priv->pclk = devm_clk_get(dev, "PCLK"); 312 * if (IS_ERR(priv->pclk)) 313 * return PTR_ERR(priv->pclk); 314 * 315 * // Further setup, display pipeline etc 316 * 317 * platform_set_drvdata(pdev, drm); 318 * 319 * drm_mode_config_reset(drm); 320 * 321 * ret = drm_dev_register(drm); 322 * if (ret) 323 * return ret; 324 * 325 * drm_fbdev_generic_setup(drm, 32); 326 * 327 * return 0; 328 * } 329 * 330 * // This function is called before the devm_ resources are released 331 * static int driver_remove(struct platform_device *pdev) 332 * { 333 * struct drm_device *drm = platform_get_drvdata(pdev); 334 * 335 * drm_dev_unregister(drm); 336 * drm_atomic_helper_shutdown(drm) 337 * 338 * return 0; 339 * } 340 * 341 * // This function is called on kernel restart and shutdown 342 * static void driver_shutdown(struct platform_device *pdev) 343 * { 344 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 345 * } 346 * 347 * static int __maybe_unused driver_pm_suspend(struct device *dev) 348 * { 349 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); 350 * } 351 * 352 * static int __maybe_unused driver_pm_resume(struct device *dev) 353 * { 354 * drm_mode_config_helper_resume(dev_get_drvdata(dev)); 355 * 356 * return 0; 357 * } 358 * 359 * static const struct dev_pm_ops driver_pm_ops = { 360 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) 361 * }; 362 * 363 * static struct platform_driver driver_driver = { 364 * .driver = { 365 * [...] 366 * .pm = &driver_pm_ops, 367 * }, 368 * .probe = driver_probe, 369 * .remove = driver_remove, 370 * .shutdown = driver_shutdown, 371 * }; 372 * module_platform_driver(driver_driver); 373 * 374 * Drivers that want to support device unplugging (USB, DT overlay unload) should 375 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect 376 * regions that is accessing device resources to prevent use after they're 377 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one 378 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before 379 * drm_atomic_helper_shutdown() is called. This means that if the disable code 380 * paths are protected, they will not run on regular driver module unload, 381 * possibily leaving the hardware enabled. 382 */ 383 384 /** 385 * drm_put_dev - Unregister and release a DRM device 386 * @dev: DRM device 387 * 388 * Called at module unload time or when a PCI device is unplugged. 389 * 390 * Cleans up all DRM device, calling drm_lastclose(). 391 * 392 * Note: Use of this function is deprecated. It will eventually go away 393 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly 394 * instead to make sure that the device isn't userspace accessible any more 395 * while teardown is in progress, ensuring that userspace can't access an 396 * inconsistent state. 397 */ 398 void drm_put_dev(struct drm_device *dev) 399 { 400 DRM_DEBUG("\n"); 401 402 if (!dev) { 403 DRM_ERROR("cleanup called no dev\n"); 404 return; 405 } 406 407 drm_dev_unregister(dev); 408 drm_dev_put(dev); 409 } 410 EXPORT_SYMBOL(drm_put_dev); 411 412 /** 413 * drm_dev_enter - Enter device critical section 414 * @dev: DRM device 415 * @idx: Pointer to index that will be passed to the matching drm_dev_exit() 416 * 417 * This function marks and protects the beginning of a section that should not 418 * be entered after the device has been unplugged. The section end is marked 419 * with drm_dev_exit(). Calls to this function can be nested. 420 * 421 * Returns: 422 * True if it is OK to enter the section, false otherwise. 423 */ 424 bool drm_dev_enter(struct drm_device *dev, int *idx) 425 { 426 *idx = srcu_read_lock(&drm_unplug_srcu); 427 428 if (dev->unplugged) { 429 srcu_read_unlock(&drm_unplug_srcu, *idx); 430 return false; 431 } 432 433 return true; 434 } 435 EXPORT_SYMBOL(drm_dev_enter); 436 437 /** 438 * drm_dev_exit - Exit device critical section 439 * @idx: index returned from drm_dev_enter() 440 * 441 * This function marks the end of a section that should not be entered after 442 * the device has been unplugged. 443 */ 444 void drm_dev_exit(int idx) 445 { 446 srcu_read_unlock(&drm_unplug_srcu, idx); 447 } 448 EXPORT_SYMBOL(drm_dev_exit); 449 450 /** 451 * drm_dev_unplug - unplug a DRM device 452 * @dev: DRM device 453 * 454 * This unplugs a hotpluggable DRM device, which makes it inaccessible to 455 * userspace operations. Entry-points can use drm_dev_enter() and 456 * drm_dev_exit() to protect device resources in a race free manner. This 457 * essentially unregisters the device like drm_dev_unregister(), but can be 458 * called while there are still open users of @dev. 459 */ 460 void drm_dev_unplug(struct drm_device *dev) 461 { 462 /* 463 * After synchronizing any critical read section is guaranteed to see 464 * the new value of ->unplugged, and any critical section which might 465 * still have seen the old value of ->unplugged is guaranteed to have 466 * finished. 467 */ 468 dev->unplugged = true; 469 synchronize_srcu(&drm_unplug_srcu); 470 471 drm_dev_unregister(dev); 472 } 473 EXPORT_SYMBOL(drm_dev_unplug); 474 475 /* 476 * DRM internal mount 477 * We want to be able to allocate our own "struct address_space" to control 478 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 479 * stand-alone address_space objects, so we need an underlying inode. As there 480 * is no way to allocate an independent inode easily, we need a fake internal 481 * VFS mount-point. 482 * 483 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 484 * frees it again. You are allowed to use iget() and iput() to get references to 485 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 486 * drm_fs_inode_free() call (which does not have to be the last iput()). 487 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 488 * between multiple inode-users. You could, technically, call 489 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 490 * iput(), but this way you'd end up with a new vfsmount for each inode. 491 */ 492 493 static int drm_fs_cnt; 494 static struct vfsmount *drm_fs_mnt; 495 496 static int drm_fs_init_fs_context(struct fs_context *fc) 497 { 498 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM; 499 } 500 501 static struct file_system_type drm_fs_type = { 502 .name = "drm", 503 .owner = THIS_MODULE, 504 .init_fs_context = drm_fs_init_fs_context, 505 .kill_sb = kill_anon_super, 506 }; 507 508 static struct inode *drm_fs_inode_new(void) 509 { 510 struct inode *inode; 511 int r; 512 513 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 514 if (r < 0) { 515 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 516 return ERR_PTR(r); 517 } 518 519 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 520 if (IS_ERR(inode)) 521 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 522 523 return inode; 524 } 525 526 static void drm_fs_inode_free(struct inode *inode) 527 { 528 if (inode) { 529 iput(inode); 530 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 531 } 532 } 533 534 /** 535 * DOC: component helper usage recommendations 536 * 537 * DRM drivers that drive hardware where a logical device consists of a pile of 538 * independent hardware blocks are recommended to use the :ref:`component helper 539 * library<component>`. For consistency and better options for code reuse the 540 * following guidelines apply: 541 * 542 * - The entire device initialization procedure should be run from the 543 * &component_master_ops.master_bind callback, starting with 544 * devm_drm_dev_alloc(), then binding all components with 545 * component_bind_all() and finishing with drm_dev_register(). 546 * 547 * - The opaque pointer passed to all components through component_bind_all() 548 * should point at &struct drm_device of the device instance, not some driver 549 * specific private structure. 550 * 551 * - The component helper fills the niche where further standardization of 552 * interfaces is not practical. When there already is, or will be, a 553 * standardized interface like &drm_bridge or &drm_panel, providing its own 554 * functions to find such components at driver load time, like 555 * drm_of_find_panel_or_bridge(), then the component helper should not be 556 * used. 557 */ 558 559 static void drm_dev_init_release(struct drm_device *dev, void *res) 560 { 561 drm_legacy_ctxbitmap_cleanup(dev); 562 drm_legacy_remove_map_hash(dev); 563 drm_fs_inode_free(dev->anon_inode); 564 565 put_device(dev->dev); 566 /* Prevent use-after-free in drm_managed_release when debugging is 567 * enabled. Slightly awkward, but can't really be helped. */ 568 dev->dev = NULL; 569 mutex_destroy(&dev->master_mutex); 570 mutex_destroy(&dev->clientlist_mutex); 571 mutex_destroy(&dev->filelist_mutex); 572 mutex_destroy(&dev->struct_mutex); 573 drm_legacy_destroy_members(dev); 574 } 575 576 static int drm_dev_init(struct drm_device *dev, 577 const struct drm_driver *driver, 578 struct device *parent) 579 { 580 int ret; 581 582 if (!drm_core_init_complete) { 583 DRM_ERROR("DRM core is not initialized\n"); 584 return -ENODEV; 585 } 586 587 if (WARN_ON(!parent)) 588 return -EINVAL; 589 590 kref_init(&dev->ref); 591 dev->dev = get_device(parent); 592 #ifdef CONFIG_DRM_LEGACY 593 dev->driver = (struct drm_driver *)driver; 594 #else 595 dev->driver = driver; 596 #endif 597 598 INIT_LIST_HEAD(&dev->managed.resources); 599 spin_lock_init(&dev->managed.lock); 600 601 /* no per-device feature limits by default */ 602 dev->driver_features = ~0u; 603 604 drm_legacy_init_members(dev); 605 INIT_LIST_HEAD(&dev->filelist); 606 INIT_LIST_HEAD(&dev->filelist_internal); 607 INIT_LIST_HEAD(&dev->clientlist); 608 INIT_LIST_HEAD(&dev->vblank_event_list); 609 610 spin_lock_init(&dev->event_lock); 611 mutex_init(&dev->struct_mutex); 612 mutex_init(&dev->filelist_mutex); 613 mutex_init(&dev->clientlist_mutex); 614 mutex_init(&dev->master_mutex); 615 616 ret = drmm_add_action(dev, drm_dev_init_release, NULL); 617 if (ret) 618 return ret; 619 620 dev->anon_inode = drm_fs_inode_new(); 621 if (IS_ERR(dev->anon_inode)) { 622 ret = PTR_ERR(dev->anon_inode); 623 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 624 goto err; 625 } 626 627 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 628 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 629 if (ret) 630 goto err; 631 } 632 633 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); 634 if (ret) 635 goto err; 636 637 ret = drm_legacy_create_map_hash(dev); 638 if (ret) 639 goto err; 640 641 drm_legacy_ctxbitmap_init(dev); 642 643 if (drm_core_check_feature(dev, DRIVER_GEM)) { 644 ret = drm_gem_init(dev); 645 if (ret) { 646 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 647 goto err; 648 } 649 } 650 651 ret = drm_dev_set_unique(dev, dev_name(parent)); 652 if (ret) 653 goto err; 654 655 return 0; 656 657 err: 658 drm_managed_release(dev); 659 660 return ret; 661 } 662 663 static void devm_drm_dev_init_release(void *data) 664 { 665 drm_dev_put(data); 666 } 667 668 static int devm_drm_dev_init(struct device *parent, 669 struct drm_device *dev, 670 const struct drm_driver *driver) 671 { 672 int ret; 673 674 ret = drm_dev_init(dev, driver, parent); 675 if (ret) 676 return ret; 677 678 ret = devm_add_action(parent, devm_drm_dev_init_release, dev); 679 if (ret) 680 devm_drm_dev_init_release(dev); 681 682 return ret; 683 } 684 685 void *__devm_drm_dev_alloc(struct device *parent, 686 const struct drm_driver *driver, 687 size_t size, size_t offset) 688 { 689 void *container; 690 struct drm_device *drm; 691 int ret; 692 693 container = kzalloc(size, GFP_KERNEL); 694 if (!container) 695 return ERR_PTR(-ENOMEM); 696 697 drm = container + offset; 698 ret = devm_drm_dev_init(parent, drm, driver); 699 if (ret) { 700 kfree(container); 701 return ERR_PTR(ret); 702 } 703 drmm_add_final_kfree(drm, container); 704 705 return container; 706 } 707 EXPORT_SYMBOL(__devm_drm_dev_alloc); 708 709 /** 710 * drm_dev_alloc - Allocate new DRM device 711 * @driver: DRM driver to allocate device for 712 * @parent: Parent device object 713 * 714 * This is the deprecated version of devm_drm_dev_alloc(), which does not support 715 * subclassing through embedding the struct &drm_device in a driver private 716 * structure, and which does not support automatic cleanup through devres. 717 * 718 * RETURNS: 719 * Pointer to new DRM device, or ERR_PTR on failure. 720 */ 721 struct drm_device *drm_dev_alloc(const struct drm_driver *driver, 722 struct device *parent) 723 { 724 struct drm_device *dev; 725 int ret; 726 727 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 728 if (!dev) 729 return ERR_PTR(-ENOMEM); 730 731 ret = drm_dev_init(dev, driver, parent); 732 if (ret) { 733 kfree(dev); 734 return ERR_PTR(ret); 735 } 736 737 drmm_add_final_kfree(dev, dev); 738 739 return dev; 740 } 741 EXPORT_SYMBOL(drm_dev_alloc); 742 743 static void drm_dev_release(struct kref *ref) 744 { 745 struct drm_device *dev = container_of(ref, struct drm_device, ref); 746 747 if (dev->driver->release) 748 dev->driver->release(dev); 749 750 drm_managed_release(dev); 751 752 kfree(dev->managed.final_kfree); 753 } 754 755 /** 756 * drm_dev_get - Take reference of a DRM device 757 * @dev: device to take reference of or NULL 758 * 759 * This increases the ref-count of @dev by one. You *must* already own a 760 * reference when calling this. Use drm_dev_put() to drop this reference 761 * again. 762 * 763 * This function never fails. However, this function does not provide *any* 764 * guarantee whether the device is alive or running. It only provides a 765 * reference to the object and the memory associated with it. 766 */ 767 void drm_dev_get(struct drm_device *dev) 768 { 769 if (dev) 770 kref_get(&dev->ref); 771 } 772 EXPORT_SYMBOL(drm_dev_get); 773 774 /** 775 * drm_dev_put - Drop reference of a DRM device 776 * @dev: device to drop reference of or NULL 777 * 778 * This decreases the ref-count of @dev by one. The device is destroyed if the 779 * ref-count drops to zero. 780 */ 781 void drm_dev_put(struct drm_device *dev) 782 { 783 if (dev) 784 kref_put(&dev->ref, drm_dev_release); 785 } 786 EXPORT_SYMBOL(drm_dev_put); 787 788 static int create_compat_control_link(struct drm_device *dev) 789 { 790 struct drm_minor *minor; 791 char *name; 792 int ret; 793 794 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 795 return 0; 796 797 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 798 if (!minor) 799 return 0; 800 801 /* 802 * Some existing userspace out there uses the existing of the controlD* 803 * sysfs files to figure out whether it's a modeset driver. It only does 804 * readdir, hence a symlink is sufficient (and the least confusing 805 * option). Otherwise controlD* is entirely unused. 806 * 807 * Old controlD chardev have been allocated in the range 808 * 64-127. 809 */ 810 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 811 if (!name) 812 return -ENOMEM; 813 814 ret = sysfs_create_link(minor->kdev->kobj.parent, 815 &minor->kdev->kobj, 816 name); 817 818 kfree(name); 819 820 return ret; 821 } 822 823 static void remove_compat_control_link(struct drm_device *dev) 824 { 825 struct drm_minor *minor; 826 char *name; 827 828 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 829 return; 830 831 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 832 if (!minor) 833 return; 834 835 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 836 if (!name) 837 return; 838 839 sysfs_remove_link(minor->kdev->kobj.parent, name); 840 841 kfree(name); 842 } 843 844 /** 845 * drm_dev_register - Register DRM device 846 * @dev: Device to register 847 * @flags: Flags passed to the driver's .load() function 848 * 849 * Register the DRM device @dev with the system, advertise device to user-space 850 * and start normal device operation. @dev must be initialized via drm_dev_init() 851 * previously. 852 * 853 * Never call this twice on any device! 854 * 855 * NOTE: To ensure backward compatibility with existing drivers method this 856 * function calls the &drm_driver.load method after registering the device 857 * nodes, creating race conditions. Usage of the &drm_driver.load methods is 858 * therefore deprecated, drivers must perform all initialization before calling 859 * drm_dev_register(). 860 * 861 * RETURNS: 862 * 0 on success, negative error code on failure. 863 */ 864 int drm_dev_register(struct drm_device *dev, unsigned long flags) 865 { 866 const struct drm_driver *driver = dev->driver; 867 int ret; 868 869 if (!driver->load) 870 drm_mode_config_validate(dev); 871 872 WARN_ON(!dev->managed.final_kfree); 873 874 if (drm_dev_needs_global_mutex(dev)) 875 mutex_lock(&drm_global_mutex); 876 877 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 878 if (ret) 879 goto err_minors; 880 881 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); 882 if (ret) 883 goto err_minors; 884 885 ret = create_compat_control_link(dev); 886 if (ret) 887 goto err_minors; 888 889 dev->registered = true; 890 891 if (dev->driver->load) { 892 ret = dev->driver->load(dev, flags); 893 if (ret) 894 goto err_minors; 895 } 896 897 if (drm_core_check_feature(dev, DRIVER_MODESET)) 898 drm_modeset_register_all(dev); 899 900 ret = 0; 901 902 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 903 driver->name, driver->major, driver->minor, 904 driver->patchlevel, driver->date, 905 dev->dev ? dev_name(dev->dev) : "virtual device", 906 dev->primary->index); 907 908 goto out_unlock; 909 910 err_minors: 911 remove_compat_control_link(dev); 912 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 913 drm_minor_unregister(dev, DRM_MINOR_RENDER); 914 out_unlock: 915 if (drm_dev_needs_global_mutex(dev)) 916 mutex_unlock(&drm_global_mutex); 917 return ret; 918 } 919 EXPORT_SYMBOL(drm_dev_register); 920 921 /** 922 * drm_dev_unregister - Unregister DRM device 923 * @dev: Device to unregister 924 * 925 * Unregister the DRM device from the system. This does the reverse of 926 * drm_dev_register() but does not deallocate the device. The caller must call 927 * drm_dev_put() to drop their final reference. 928 * 929 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), 930 * which can be called while there are still open users of @dev. 931 * 932 * This should be called first in the device teardown code to make sure 933 * userspace can't access the device instance any more. 934 */ 935 void drm_dev_unregister(struct drm_device *dev) 936 { 937 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 938 drm_lastclose(dev); 939 940 dev->registered = false; 941 942 drm_client_dev_unregister(dev); 943 944 if (drm_core_check_feature(dev, DRIVER_MODESET)) 945 drm_modeset_unregister_all(dev); 946 947 if (dev->driver->unload) 948 dev->driver->unload(dev); 949 950 if (dev->agp) 951 drm_pci_agp_destroy(dev); 952 953 drm_legacy_rmmaps(dev); 954 955 remove_compat_control_link(dev); 956 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 957 drm_minor_unregister(dev, DRM_MINOR_RENDER); 958 } 959 EXPORT_SYMBOL(drm_dev_unregister); 960 961 /** 962 * drm_dev_set_unique - Set the unique name of a DRM device 963 * @dev: device of which to set the unique name 964 * @name: unique name 965 * 966 * Sets the unique name of a DRM device using the specified string. This is 967 * already done by drm_dev_init(), drivers should only override the default 968 * unique name for backwards compatibility reasons. 969 * 970 * Return: 0 on success or a negative error code on failure. 971 */ 972 int drm_dev_set_unique(struct drm_device *dev, const char *name) 973 { 974 drmm_kfree(dev, dev->unique); 975 dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); 976 977 return dev->unique ? 0 : -ENOMEM; 978 } 979 EXPORT_SYMBOL(drm_dev_set_unique); 980 981 /* 982 * DRM Core 983 * The DRM core module initializes all global DRM objects and makes them 984 * available to drivers. Once setup, drivers can probe their respective 985 * devices. 986 * Currently, core management includes: 987 * - The "DRM-Global" key/value database 988 * - Global ID management for connectors 989 * - DRM major number allocation 990 * - DRM minor management 991 * - DRM sysfs class 992 * - DRM debugfs root 993 * 994 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 995 * interface registered on a DRM device, you can request minor numbers from DRM 996 * core. DRM core takes care of major-number management and char-dev 997 * registration. A stub ->open() callback forwards any open() requests to the 998 * registered minor. 999 */ 1000 1001 static int drm_stub_open(struct inode *inode, struct file *filp) 1002 { 1003 const struct file_operations *new_fops; 1004 struct drm_minor *minor; 1005 int err; 1006 1007 DRM_DEBUG("\n"); 1008 1009 minor = drm_minor_acquire(iminor(inode)); 1010 if (IS_ERR(minor)) 1011 return PTR_ERR(minor); 1012 1013 new_fops = fops_get(minor->dev->driver->fops); 1014 if (!new_fops) { 1015 err = -ENODEV; 1016 goto out; 1017 } 1018 1019 replace_fops(filp, new_fops); 1020 if (filp->f_op->open) 1021 err = filp->f_op->open(inode, filp); 1022 else 1023 err = 0; 1024 1025 out: 1026 drm_minor_release(minor); 1027 1028 return err; 1029 } 1030 1031 static const struct file_operations drm_stub_fops = { 1032 .owner = THIS_MODULE, 1033 .open = drm_stub_open, 1034 .llseek = noop_llseek, 1035 }; 1036 1037 static void drm_core_exit(void) 1038 { 1039 unregister_chrdev(DRM_MAJOR, "drm"); 1040 debugfs_remove(drm_debugfs_root); 1041 drm_sysfs_destroy(); 1042 idr_destroy(&drm_minors_idr); 1043 drm_connector_ida_destroy(); 1044 } 1045 1046 static int __init drm_core_init(void) 1047 { 1048 int ret; 1049 1050 drm_connector_ida_init(); 1051 idr_init(&drm_minors_idr); 1052 1053 ret = drm_sysfs_init(); 1054 if (ret < 0) { 1055 DRM_ERROR("Cannot create DRM class: %d\n", ret); 1056 goto error; 1057 } 1058 1059 drm_debugfs_root = debugfs_create_dir("dri", NULL); 1060 1061 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); 1062 if (ret < 0) 1063 goto error; 1064 1065 drm_core_init_complete = true; 1066 1067 DRM_DEBUG("Initialized\n"); 1068 return 0; 1069 1070 error: 1071 drm_core_exit(); 1072 return ret; 1073 } 1074 1075 module_init(drm_core_init); 1076 module_exit(drm_core_exit); 1077