1 /* 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 3 * 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Author Rickard E. (Rik) Faith <faith@valinux.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/mount.h> 34 #include <linux/pseudo_fs.h> 35 #include <linux/slab.h> 36 #include <linux/srcu.h> 37 38 #include <drm/drm_client.h> 39 #include <drm/drm_color_mgmt.h> 40 #include <drm/drm_drv.h> 41 #include <drm/drm_file.h> 42 #include <drm/drm_managed.h> 43 #include <drm/drm_mode_object.h> 44 #include <drm/drm_print.h> 45 46 #include "drm_crtc_internal.h" 47 #include "drm_internal.h" 48 #include "drm_legacy.h" 49 50 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); 51 MODULE_DESCRIPTION("DRM shared core routines"); 52 MODULE_LICENSE("GPL and additional rights"); 53 54 static DEFINE_SPINLOCK(drm_minor_lock); 55 static struct idr drm_minors_idr; 56 57 /* 58 * If the drm core fails to init for whatever reason, 59 * we should prevent any drivers from registering with it. 60 * It's best to check this at drm_dev_init(), as some drivers 61 * prefer to embed struct drm_device into their own device 62 * structure and call drm_dev_init() themselves. 63 */ 64 static bool drm_core_init_complete = false; 65 66 static struct dentry *drm_debugfs_root; 67 68 DEFINE_STATIC_SRCU(drm_unplug_srcu); 69 70 /* 71 * DRM Minors 72 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 73 * of them is represented by a drm_minor object. Depending on the capabilities 74 * of the device-driver, different interfaces are registered. 75 * 76 * Minors can be accessed via dev->$minor_name. This pointer is either 77 * NULL or a valid drm_minor pointer and stays valid as long as the device is 78 * valid. This means, DRM minors have the same life-time as the underlying 79 * device. However, this doesn't mean that the minor is active. Minors are 80 * registered and unregistered dynamically according to device-state. 81 */ 82 83 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 84 unsigned int type) 85 { 86 switch (type) { 87 case DRM_MINOR_PRIMARY: 88 return &dev->primary; 89 case DRM_MINOR_RENDER: 90 return &dev->render; 91 default: 92 BUG(); 93 } 94 } 95 96 static void drm_minor_alloc_release(struct drm_device *dev, void *data) 97 { 98 struct drm_minor *minor = data; 99 unsigned long flags; 100 101 WARN_ON(dev != minor->dev); 102 103 put_device(minor->kdev); 104 105 spin_lock_irqsave(&drm_minor_lock, flags); 106 idr_remove(&drm_minors_idr, minor->index); 107 spin_unlock_irqrestore(&drm_minor_lock, flags); 108 } 109 110 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 111 { 112 struct drm_minor *minor; 113 unsigned long flags; 114 int r; 115 116 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); 117 if (!minor) 118 return -ENOMEM; 119 120 minor->type = type; 121 minor->dev = dev; 122 123 idr_preload(GFP_KERNEL); 124 spin_lock_irqsave(&drm_minor_lock, flags); 125 r = idr_alloc(&drm_minors_idr, 126 NULL, 127 64 * type, 128 64 * (type + 1), 129 GFP_NOWAIT); 130 spin_unlock_irqrestore(&drm_minor_lock, flags); 131 idr_preload_end(); 132 133 if (r < 0) 134 return r; 135 136 minor->index = r; 137 138 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); 139 if (r) 140 return r; 141 142 minor->kdev = drm_sysfs_minor_alloc(minor); 143 if (IS_ERR(minor->kdev)) 144 return PTR_ERR(minor->kdev); 145 146 *drm_minor_get_slot(dev, type) = minor; 147 return 0; 148 } 149 150 static int drm_minor_register(struct drm_device *dev, unsigned int type) 151 { 152 struct drm_minor *minor; 153 unsigned long flags; 154 int ret; 155 156 DRM_DEBUG("\n"); 157 158 minor = *drm_minor_get_slot(dev, type); 159 if (!minor) 160 return 0; 161 162 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 163 if (ret) { 164 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 165 goto err_debugfs; 166 } 167 168 ret = device_add(minor->kdev); 169 if (ret) 170 goto err_debugfs; 171 172 /* replace NULL with @minor so lookups will succeed from now on */ 173 spin_lock_irqsave(&drm_minor_lock, flags); 174 idr_replace(&drm_minors_idr, minor, minor->index); 175 spin_unlock_irqrestore(&drm_minor_lock, flags); 176 177 DRM_DEBUG("new minor registered %d\n", minor->index); 178 return 0; 179 180 err_debugfs: 181 drm_debugfs_cleanup(minor); 182 return ret; 183 } 184 185 static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 186 { 187 struct drm_minor *minor; 188 unsigned long flags; 189 190 minor = *drm_minor_get_slot(dev, type); 191 if (!minor || !device_is_registered(minor->kdev)) 192 return; 193 194 /* replace @minor with NULL so lookups will fail from now on */ 195 spin_lock_irqsave(&drm_minor_lock, flags); 196 idr_replace(&drm_minors_idr, NULL, minor->index); 197 spin_unlock_irqrestore(&drm_minor_lock, flags); 198 199 device_del(minor->kdev); 200 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 201 drm_debugfs_cleanup(minor); 202 } 203 204 /* 205 * Looks up the given minor-ID and returns the respective DRM-minor object. The 206 * refence-count of the underlying device is increased so you must release this 207 * object with drm_minor_release(). 208 * 209 * As long as you hold this minor, it is guaranteed that the object and the 210 * minor->dev pointer will stay valid! However, the device may get unplugged and 211 * unregistered while you hold the minor. 212 */ 213 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 214 { 215 struct drm_minor *minor; 216 unsigned long flags; 217 218 spin_lock_irqsave(&drm_minor_lock, flags); 219 minor = idr_find(&drm_minors_idr, minor_id); 220 if (minor) 221 drm_dev_get(minor->dev); 222 spin_unlock_irqrestore(&drm_minor_lock, flags); 223 224 if (!minor) { 225 return ERR_PTR(-ENODEV); 226 } else if (drm_dev_is_unplugged(minor->dev)) { 227 drm_dev_put(minor->dev); 228 return ERR_PTR(-ENODEV); 229 } 230 231 return minor; 232 } 233 234 void drm_minor_release(struct drm_minor *minor) 235 { 236 drm_dev_put(minor->dev); 237 } 238 239 /** 240 * DOC: driver instance overview 241 * 242 * A device instance for a drm driver is represented by &struct drm_device. This 243 * is allocated and initialized with devm_drm_dev_alloc(), usually from 244 * bus-specific ->probe() callbacks implemented by the driver. The driver then 245 * needs to initialize all the various subsystems for the drm device like memory 246 * management, vblank handling, modesetting support and initial output 247 * configuration plus obviously initialize all the corresponding hardware bits. 248 * Finally when everything is up and running and ready for userspace the device 249 * instance can be published using drm_dev_register(). 250 * 251 * There is also deprecated support for initalizing device instances using 252 * bus-specific helpers and the &drm_driver.load callback. But due to 253 * backwards-compatibility needs the device instance have to be published too 254 * early, which requires unpretty global locking to make safe and is therefore 255 * only support for existing drivers not yet converted to the new scheme. 256 * 257 * When cleaning up a device instance everything needs to be done in reverse: 258 * First unpublish the device instance with drm_dev_unregister(). Then clean up 259 * any other resources allocated at device initialization and drop the driver's 260 * reference to &drm_device using drm_dev_put(). 261 * 262 * Note that any allocation or resource which is visible to userspace must be 263 * released only when the final drm_dev_put() is called, and not when the 264 * driver is unbound from the underlying physical struct &device. Best to use 265 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and 266 * related functions. 267 * 268 * devres managed resources like devm_kmalloc() can only be used for resources 269 * directly related to the underlying hardware device, and only used in code 270 * paths fully protected by drm_dev_enter() and drm_dev_exit(). 271 * 272 * Display driver example 273 * ~~~~~~~~~~~~~~~~~~~~~~ 274 * 275 * The following example shows a typical structure of a DRM display driver. 276 * The example focus on the probe() function and the other functions that is 277 * almost always present and serves as a demonstration of devm_drm_dev_alloc(). 278 * 279 * .. code-block:: c 280 * 281 * struct driver_device { 282 * struct drm_device drm; 283 * void *userspace_facing; 284 * struct clk *pclk; 285 * }; 286 * 287 * static struct drm_driver driver_drm_driver = { 288 * [...] 289 * }; 290 * 291 * static int driver_probe(struct platform_device *pdev) 292 * { 293 * struct driver_device *priv; 294 * struct drm_device *drm; 295 * int ret; 296 * 297 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, 298 * struct driver_device, drm); 299 * if (IS_ERR(priv)) 300 * return PTR_ERR(priv); 301 * drm = &priv->drm; 302 * 303 * ret = drmm_mode_config_init(drm); 304 * if (ret) 305 * return ret; 306 * 307 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); 308 * if (!priv->userspace_facing) 309 * return -ENOMEM; 310 * 311 * priv->pclk = devm_clk_get(dev, "PCLK"); 312 * if (IS_ERR(priv->pclk)) 313 * return PTR_ERR(priv->pclk); 314 * 315 * // Further setup, display pipeline etc 316 * 317 * platform_set_drvdata(pdev, drm); 318 * 319 * drm_mode_config_reset(drm); 320 * 321 * ret = drm_dev_register(drm); 322 * if (ret) 323 * return ret; 324 * 325 * drm_fbdev_generic_setup(drm, 32); 326 * 327 * return 0; 328 * } 329 * 330 * // This function is called before the devm_ resources are released 331 * static int driver_remove(struct platform_device *pdev) 332 * { 333 * struct drm_device *drm = platform_get_drvdata(pdev); 334 * 335 * drm_dev_unregister(drm); 336 * drm_atomic_helper_shutdown(drm) 337 * 338 * return 0; 339 * } 340 * 341 * // This function is called on kernel restart and shutdown 342 * static void driver_shutdown(struct platform_device *pdev) 343 * { 344 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 345 * } 346 * 347 * static int __maybe_unused driver_pm_suspend(struct device *dev) 348 * { 349 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); 350 * } 351 * 352 * static int __maybe_unused driver_pm_resume(struct device *dev) 353 * { 354 * drm_mode_config_helper_resume(dev_get_drvdata(dev)); 355 * 356 * return 0; 357 * } 358 * 359 * static const struct dev_pm_ops driver_pm_ops = { 360 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) 361 * }; 362 * 363 * static struct platform_driver driver_driver = { 364 * .driver = { 365 * [...] 366 * .pm = &driver_pm_ops, 367 * }, 368 * .probe = driver_probe, 369 * .remove = driver_remove, 370 * .shutdown = driver_shutdown, 371 * }; 372 * module_platform_driver(driver_driver); 373 * 374 * Drivers that want to support device unplugging (USB, DT overlay unload) should 375 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect 376 * regions that is accessing device resources to prevent use after they're 377 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one 378 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before 379 * drm_atomic_helper_shutdown() is called. This means that if the disable code 380 * paths are protected, they will not run on regular driver module unload, 381 * possibily leaving the hardware enabled. 382 */ 383 384 /** 385 * drm_put_dev - Unregister and release a DRM device 386 * @dev: DRM device 387 * 388 * Called at module unload time or when a PCI device is unplugged. 389 * 390 * Cleans up all DRM device, calling drm_lastclose(). 391 * 392 * Note: Use of this function is deprecated. It will eventually go away 393 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly 394 * instead to make sure that the device isn't userspace accessible any more 395 * while teardown is in progress, ensuring that userspace can't access an 396 * inconsistent state. 397 */ 398 void drm_put_dev(struct drm_device *dev) 399 { 400 DRM_DEBUG("\n"); 401 402 if (!dev) { 403 DRM_ERROR("cleanup called no dev\n"); 404 return; 405 } 406 407 drm_dev_unregister(dev); 408 drm_dev_put(dev); 409 } 410 EXPORT_SYMBOL(drm_put_dev); 411 412 /** 413 * drm_dev_enter - Enter device critical section 414 * @dev: DRM device 415 * @idx: Pointer to index that will be passed to the matching drm_dev_exit() 416 * 417 * This function marks and protects the beginning of a section that should not 418 * be entered after the device has been unplugged. The section end is marked 419 * with drm_dev_exit(). Calls to this function can be nested. 420 * 421 * Returns: 422 * True if it is OK to enter the section, false otherwise. 423 */ 424 bool drm_dev_enter(struct drm_device *dev, int *idx) 425 { 426 *idx = srcu_read_lock(&drm_unplug_srcu); 427 428 if (dev->unplugged) { 429 srcu_read_unlock(&drm_unplug_srcu, *idx); 430 return false; 431 } 432 433 return true; 434 } 435 EXPORT_SYMBOL(drm_dev_enter); 436 437 /** 438 * drm_dev_exit - Exit device critical section 439 * @idx: index returned from drm_dev_enter() 440 * 441 * This function marks the end of a section that should not be entered after 442 * the device has been unplugged. 443 */ 444 void drm_dev_exit(int idx) 445 { 446 srcu_read_unlock(&drm_unplug_srcu, idx); 447 } 448 EXPORT_SYMBOL(drm_dev_exit); 449 450 /** 451 * drm_dev_unplug - unplug a DRM device 452 * @dev: DRM device 453 * 454 * This unplugs a hotpluggable DRM device, which makes it inaccessible to 455 * userspace operations. Entry-points can use drm_dev_enter() and 456 * drm_dev_exit() to protect device resources in a race free manner. This 457 * essentially unregisters the device like drm_dev_unregister(), but can be 458 * called while there are still open users of @dev. 459 */ 460 void drm_dev_unplug(struct drm_device *dev) 461 { 462 /* 463 * After synchronizing any critical read section is guaranteed to see 464 * the new value of ->unplugged, and any critical section which might 465 * still have seen the old value of ->unplugged is guaranteed to have 466 * finished. 467 */ 468 dev->unplugged = true; 469 synchronize_srcu(&drm_unplug_srcu); 470 471 drm_dev_unregister(dev); 472 } 473 EXPORT_SYMBOL(drm_dev_unplug); 474 475 /* 476 * DRM internal mount 477 * We want to be able to allocate our own "struct address_space" to control 478 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 479 * stand-alone address_space objects, so we need an underlying inode. As there 480 * is no way to allocate an independent inode easily, we need a fake internal 481 * VFS mount-point. 482 * 483 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 484 * frees it again. You are allowed to use iget() and iput() to get references to 485 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 486 * drm_fs_inode_free() call (which does not have to be the last iput()). 487 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 488 * between multiple inode-users. You could, technically, call 489 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 490 * iput(), but this way you'd end up with a new vfsmount for each inode. 491 */ 492 493 static int drm_fs_cnt; 494 static struct vfsmount *drm_fs_mnt; 495 496 static int drm_fs_init_fs_context(struct fs_context *fc) 497 { 498 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM; 499 } 500 501 static struct file_system_type drm_fs_type = { 502 .name = "drm", 503 .owner = THIS_MODULE, 504 .init_fs_context = drm_fs_init_fs_context, 505 .kill_sb = kill_anon_super, 506 }; 507 508 static struct inode *drm_fs_inode_new(void) 509 { 510 struct inode *inode; 511 int r; 512 513 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 514 if (r < 0) { 515 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 516 return ERR_PTR(r); 517 } 518 519 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 520 if (IS_ERR(inode)) 521 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 522 523 return inode; 524 } 525 526 static void drm_fs_inode_free(struct inode *inode) 527 { 528 if (inode) { 529 iput(inode); 530 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 531 } 532 } 533 534 /** 535 * DOC: component helper usage recommendations 536 * 537 * DRM drivers that drive hardware where a logical device consists of a pile of 538 * independent hardware blocks are recommended to use the :ref:`component helper 539 * library<component>`. For consistency and better options for code reuse the 540 * following guidelines apply: 541 * 542 * - The entire device initialization procedure should be run from the 543 * &component_master_ops.master_bind callback, starting with 544 * devm_drm_dev_alloc(), then binding all components with 545 * component_bind_all() and finishing with drm_dev_register(). 546 * 547 * - The opaque pointer passed to all components through component_bind_all() 548 * should point at &struct drm_device of the device instance, not some driver 549 * specific private structure. 550 * 551 * - The component helper fills the niche where further standardization of 552 * interfaces is not practical. When there already is, or will be, a 553 * standardized interface like &drm_bridge or &drm_panel, providing its own 554 * functions to find such components at driver load time, like 555 * drm_of_find_panel_or_bridge(), then the component helper should not be 556 * used. 557 */ 558 559 static void drm_dev_init_release(struct drm_device *dev, void *res) 560 { 561 drm_legacy_ctxbitmap_cleanup(dev); 562 drm_legacy_remove_map_hash(dev); 563 drm_fs_inode_free(dev->anon_inode); 564 565 put_device(dev->dev); 566 /* Prevent use-after-free in drm_managed_release when debugging is 567 * enabled. Slightly awkward, but can't really be helped. */ 568 dev->dev = NULL; 569 mutex_destroy(&dev->master_mutex); 570 mutex_destroy(&dev->clientlist_mutex); 571 mutex_destroy(&dev->filelist_mutex); 572 mutex_destroy(&dev->struct_mutex); 573 drm_legacy_destroy_members(dev); 574 } 575 576 static int drm_dev_init(struct drm_device *dev, 577 struct drm_driver *driver, 578 struct device *parent) 579 { 580 int ret; 581 582 if (!drm_core_init_complete) { 583 DRM_ERROR("DRM core is not initialized\n"); 584 return -ENODEV; 585 } 586 587 if (WARN_ON(!parent)) 588 return -EINVAL; 589 590 kref_init(&dev->ref); 591 dev->dev = get_device(parent); 592 dev->driver = driver; 593 594 INIT_LIST_HEAD(&dev->managed.resources); 595 spin_lock_init(&dev->managed.lock); 596 597 /* no per-device feature limits by default */ 598 dev->driver_features = ~0u; 599 600 drm_legacy_init_members(dev); 601 INIT_LIST_HEAD(&dev->filelist); 602 INIT_LIST_HEAD(&dev->filelist_internal); 603 INIT_LIST_HEAD(&dev->clientlist); 604 INIT_LIST_HEAD(&dev->vblank_event_list); 605 606 spin_lock_init(&dev->event_lock); 607 mutex_init(&dev->struct_mutex); 608 mutex_init(&dev->filelist_mutex); 609 mutex_init(&dev->clientlist_mutex); 610 mutex_init(&dev->master_mutex); 611 612 ret = drmm_add_action(dev, drm_dev_init_release, NULL); 613 if (ret) 614 return ret; 615 616 dev->anon_inode = drm_fs_inode_new(); 617 if (IS_ERR(dev->anon_inode)) { 618 ret = PTR_ERR(dev->anon_inode); 619 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 620 goto err; 621 } 622 623 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 624 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 625 if (ret) 626 goto err; 627 } 628 629 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); 630 if (ret) 631 goto err; 632 633 ret = drm_legacy_create_map_hash(dev); 634 if (ret) 635 goto err; 636 637 drm_legacy_ctxbitmap_init(dev); 638 639 if (drm_core_check_feature(dev, DRIVER_GEM)) { 640 ret = drm_gem_init(dev); 641 if (ret) { 642 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 643 goto err; 644 } 645 } 646 647 ret = drm_dev_set_unique(dev, dev_name(parent)); 648 if (ret) 649 goto err; 650 651 return 0; 652 653 err: 654 drm_managed_release(dev); 655 656 return ret; 657 } 658 659 static void devm_drm_dev_init_release(void *data) 660 { 661 drm_dev_put(data); 662 } 663 664 static int devm_drm_dev_init(struct device *parent, 665 struct drm_device *dev, 666 struct drm_driver *driver) 667 { 668 int ret; 669 670 ret = drm_dev_init(dev, driver, parent); 671 if (ret) 672 return ret; 673 674 ret = devm_add_action(parent, devm_drm_dev_init_release, dev); 675 if (ret) 676 devm_drm_dev_init_release(dev); 677 678 return ret; 679 } 680 681 void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, 682 size_t size, size_t offset) 683 { 684 void *container; 685 struct drm_device *drm; 686 int ret; 687 688 container = kzalloc(size, GFP_KERNEL); 689 if (!container) 690 return ERR_PTR(-ENOMEM); 691 692 drm = container + offset; 693 ret = devm_drm_dev_init(parent, drm, driver); 694 if (ret) { 695 kfree(container); 696 return ERR_PTR(ret); 697 } 698 drmm_add_final_kfree(drm, container); 699 700 return container; 701 } 702 EXPORT_SYMBOL(__devm_drm_dev_alloc); 703 704 /** 705 * drm_dev_alloc - Allocate new DRM device 706 * @driver: DRM driver to allocate device for 707 * @parent: Parent device object 708 * 709 * This is the deprecated version of devm_drm_dev_alloc(), which does not support 710 * subclassing through embedding the struct &drm_device in a driver private 711 * structure, and which does not support automatic cleanup through devres. 712 * 713 * RETURNS: 714 * Pointer to new DRM device, or ERR_PTR on failure. 715 */ 716 struct drm_device *drm_dev_alloc(struct drm_driver *driver, 717 struct device *parent) 718 { 719 struct drm_device *dev; 720 int ret; 721 722 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 723 if (!dev) 724 return ERR_PTR(-ENOMEM); 725 726 ret = drm_dev_init(dev, driver, parent); 727 if (ret) { 728 kfree(dev); 729 return ERR_PTR(ret); 730 } 731 732 drmm_add_final_kfree(dev, dev); 733 734 return dev; 735 } 736 EXPORT_SYMBOL(drm_dev_alloc); 737 738 static void drm_dev_release(struct kref *ref) 739 { 740 struct drm_device *dev = container_of(ref, struct drm_device, ref); 741 742 if (dev->driver->release) 743 dev->driver->release(dev); 744 745 drm_managed_release(dev); 746 747 kfree(dev->managed.final_kfree); 748 } 749 750 /** 751 * drm_dev_get - Take reference of a DRM device 752 * @dev: device to take reference of or NULL 753 * 754 * This increases the ref-count of @dev by one. You *must* already own a 755 * reference when calling this. Use drm_dev_put() to drop this reference 756 * again. 757 * 758 * This function never fails. However, this function does not provide *any* 759 * guarantee whether the device is alive or running. It only provides a 760 * reference to the object and the memory associated with it. 761 */ 762 void drm_dev_get(struct drm_device *dev) 763 { 764 if (dev) 765 kref_get(&dev->ref); 766 } 767 EXPORT_SYMBOL(drm_dev_get); 768 769 /** 770 * drm_dev_put - Drop reference of a DRM device 771 * @dev: device to drop reference of or NULL 772 * 773 * This decreases the ref-count of @dev by one. The device is destroyed if the 774 * ref-count drops to zero. 775 */ 776 void drm_dev_put(struct drm_device *dev) 777 { 778 if (dev) 779 kref_put(&dev->ref, drm_dev_release); 780 } 781 EXPORT_SYMBOL(drm_dev_put); 782 783 static int create_compat_control_link(struct drm_device *dev) 784 { 785 struct drm_minor *minor; 786 char *name; 787 int ret; 788 789 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 790 return 0; 791 792 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 793 if (!minor) 794 return 0; 795 796 /* 797 * Some existing userspace out there uses the existing of the controlD* 798 * sysfs files to figure out whether it's a modeset driver. It only does 799 * readdir, hence a symlink is sufficient (and the least confusing 800 * option). Otherwise controlD* is entirely unused. 801 * 802 * Old controlD chardev have been allocated in the range 803 * 64-127. 804 */ 805 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 806 if (!name) 807 return -ENOMEM; 808 809 ret = sysfs_create_link(minor->kdev->kobj.parent, 810 &minor->kdev->kobj, 811 name); 812 813 kfree(name); 814 815 return ret; 816 } 817 818 static void remove_compat_control_link(struct drm_device *dev) 819 { 820 struct drm_minor *minor; 821 char *name; 822 823 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 824 return; 825 826 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 827 if (!minor) 828 return; 829 830 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 831 if (!name) 832 return; 833 834 sysfs_remove_link(minor->kdev->kobj.parent, name); 835 836 kfree(name); 837 } 838 839 /** 840 * drm_dev_register - Register DRM device 841 * @dev: Device to register 842 * @flags: Flags passed to the driver's .load() function 843 * 844 * Register the DRM device @dev with the system, advertise device to user-space 845 * and start normal device operation. @dev must be initialized via drm_dev_init() 846 * previously. 847 * 848 * Never call this twice on any device! 849 * 850 * NOTE: To ensure backward compatibility with existing drivers method this 851 * function calls the &drm_driver.load method after registering the device 852 * nodes, creating race conditions. Usage of the &drm_driver.load methods is 853 * therefore deprecated, drivers must perform all initialization before calling 854 * drm_dev_register(). 855 * 856 * RETURNS: 857 * 0 on success, negative error code on failure. 858 */ 859 int drm_dev_register(struct drm_device *dev, unsigned long flags) 860 { 861 struct drm_driver *driver = dev->driver; 862 int ret; 863 864 if (!driver->load) 865 drm_mode_config_validate(dev); 866 867 WARN_ON(!dev->managed.final_kfree); 868 869 if (drm_dev_needs_global_mutex(dev)) 870 mutex_lock(&drm_global_mutex); 871 872 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 873 if (ret) 874 goto err_minors; 875 876 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); 877 if (ret) 878 goto err_minors; 879 880 ret = create_compat_control_link(dev); 881 if (ret) 882 goto err_minors; 883 884 dev->registered = true; 885 886 if (dev->driver->load) { 887 ret = dev->driver->load(dev, flags); 888 if (ret) 889 goto err_minors; 890 } 891 892 if (drm_core_check_feature(dev, DRIVER_MODESET)) 893 drm_modeset_register_all(dev); 894 895 ret = 0; 896 897 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 898 driver->name, driver->major, driver->minor, 899 driver->patchlevel, driver->date, 900 dev->dev ? dev_name(dev->dev) : "virtual device", 901 dev->primary->index); 902 903 goto out_unlock; 904 905 err_minors: 906 remove_compat_control_link(dev); 907 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 908 drm_minor_unregister(dev, DRM_MINOR_RENDER); 909 out_unlock: 910 if (drm_dev_needs_global_mutex(dev)) 911 mutex_unlock(&drm_global_mutex); 912 return ret; 913 } 914 EXPORT_SYMBOL(drm_dev_register); 915 916 /** 917 * drm_dev_unregister - Unregister DRM device 918 * @dev: Device to unregister 919 * 920 * Unregister the DRM device from the system. This does the reverse of 921 * drm_dev_register() but does not deallocate the device. The caller must call 922 * drm_dev_put() to drop their final reference. 923 * 924 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), 925 * which can be called while there are still open users of @dev. 926 * 927 * This should be called first in the device teardown code to make sure 928 * userspace can't access the device instance any more. 929 */ 930 void drm_dev_unregister(struct drm_device *dev) 931 { 932 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 933 drm_lastclose(dev); 934 935 dev->registered = false; 936 937 drm_client_dev_unregister(dev); 938 939 if (drm_core_check_feature(dev, DRIVER_MODESET)) 940 drm_modeset_unregister_all(dev); 941 942 if (dev->driver->unload) 943 dev->driver->unload(dev); 944 945 if (dev->agp) 946 drm_pci_agp_destroy(dev); 947 948 drm_legacy_rmmaps(dev); 949 950 remove_compat_control_link(dev); 951 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 952 drm_minor_unregister(dev, DRM_MINOR_RENDER); 953 } 954 EXPORT_SYMBOL(drm_dev_unregister); 955 956 /** 957 * drm_dev_set_unique - Set the unique name of a DRM device 958 * @dev: device of which to set the unique name 959 * @name: unique name 960 * 961 * Sets the unique name of a DRM device using the specified string. This is 962 * already done by drm_dev_init(), drivers should only override the default 963 * unique name for backwards compatibility reasons. 964 * 965 * Return: 0 on success or a negative error code on failure. 966 */ 967 int drm_dev_set_unique(struct drm_device *dev, const char *name) 968 { 969 drmm_kfree(dev, dev->unique); 970 dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); 971 972 return dev->unique ? 0 : -ENOMEM; 973 } 974 EXPORT_SYMBOL(drm_dev_set_unique); 975 976 /* 977 * DRM Core 978 * The DRM core module initializes all global DRM objects and makes them 979 * available to drivers. Once setup, drivers can probe their respective 980 * devices. 981 * Currently, core management includes: 982 * - The "DRM-Global" key/value database 983 * - Global ID management for connectors 984 * - DRM major number allocation 985 * - DRM minor management 986 * - DRM sysfs class 987 * - DRM debugfs root 988 * 989 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 990 * interface registered on a DRM device, you can request minor numbers from DRM 991 * core. DRM core takes care of major-number management and char-dev 992 * registration. A stub ->open() callback forwards any open() requests to the 993 * registered minor. 994 */ 995 996 static int drm_stub_open(struct inode *inode, struct file *filp) 997 { 998 const struct file_operations *new_fops; 999 struct drm_minor *minor; 1000 int err; 1001 1002 DRM_DEBUG("\n"); 1003 1004 minor = drm_minor_acquire(iminor(inode)); 1005 if (IS_ERR(minor)) 1006 return PTR_ERR(minor); 1007 1008 new_fops = fops_get(minor->dev->driver->fops); 1009 if (!new_fops) { 1010 err = -ENODEV; 1011 goto out; 1012 } 1013 1014 replace_fops(filp, new_fops); 1015 if (filp->f_op->open) 1016 err = filp->f_op->open(inode, filp); 1017 else 1018 err = 0; 1019 1020 out: 1021 drm_minor_release(minor); 1022 1023 return err; 1024 } 1025 1026 static const struct file_operations drm_stub_fops = { 1027 .owner = THIS_MODULE, 1028 .open = drm_stub_open, 1029 .llseek = noop_llseek, 1030 }; 1031 1032 static void drm_core_exit(void) 1033 { 1034 unregister_chrdev(DRM_MAJOR, "drm"); 1035 debugfs_remove(drm_debugfs_root); 1036 drm_sysfs_destroy(); 1037 idr_destroy(&drm_minors_idr); 1038 drm_connector_ida_destroy(); 1039 } 1040 1041 static int __init drm_core_init(void) 1042 { 1043 int ret; 1044 1045 drm_connector_ida_init(); 1046 idr_init(&drm_minors_idr); 1047 1048 ret = drm_sysfs_init(); 1049 if (ret < 0) { 1050 DRM_ERROR("Cannot create DRM class: %d\n", ret); 1051 goto error; 1052 } 1053 1054 drm_debugfs_root = debugfs_create_dir("dri", NULL); 1055 1056 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); 1057 if (ret < 0) 1058 goto error; 1059 1060 drm_core_init_complete = true; 1061 1062 DRM_DEBUG("Initialized\n"); 1063 return 0; 1064 1065 error: 1066 drm_core_exit(); 1067 return ret; 1068 } 1069 1070 module_init(drm_core_init); 1071 module_exit(drm_core_exit); 1072