1 /* 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 3 * 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Author Rickard E. (Rik) Faith <faith@valinux.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/mount.h> 34 #include <linux/pseudo_fs.h> 35 #include <linux/slab.h> 36 #include <linux/srcu.h> 37 38 #include <drm/drm_cache.h> 39 #include <drm/drm_client.h> 40 #include <drm/drm_color_mgmt.h> 41 #include <drm/drm_drv.h> 42 #include <drm/drm_file.h> 43 #include <drm/drm_managed.h> 44 #include <drm/drm_mode_object.h> 45 #include <drm/drm_print.h> 46 47 #include "drm_crtc_internal.h" 48 #include "drm_internal.h" 49 #include "drm_legacy.h" 50 51 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); 52 MODULE_DESCRIPTION("DRM shared core routines"); 53 MODULE_LICENSE("GPL and additional rights"); 54 55 static DEFINE_SPINLOCK(drm_minor_lock); 56 static struct idr drm_minors_idr; 57 58 /* 59 * If the drm core fails to init for whatever reason, 60 * we should prevent any drivers from registering with it. 61 * It's best to check this at drm_dev_init(), as some drivers 62 * prefer to embed struct drm_device into their own device 63 * structure and call drm_dev_init() themselves. 64 */ 65 static bool drm_core_init_complete; 66 67 static struct dentry *drm_debugfs_root; 68 69 DEFINE_STATIC_SRCU(drm_unplug_srcu); 70 71 /* 72 * DRM Minors 73 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 74 * of them is represented by a drm_minor object. Depending on the capabilities 75 * of the device-driver, different interfaces are registered. 76 * 77 * Minors can be accessed via dev->$minor_name. This pointer is either 78 * NULL or a valid drm_minor pointer and stays valid as long as the device is 79 * valid. This means, DRM minors have the same life-time as the underlying 80 * device. However, this doesn't mean that the minor is active. Minors are 81 * registered and unregistered dynamically according to device-state. 82 */ 83 84 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 85 unsigned int type) 86 { 87 switch (type) { 88 case DRM_MINOR_PRIMARY: 89 return &dev->primary; 90 case DRM_MINOR_RENDER: 91 return &dev->render; 92 default: 93 BUG(); 94 } 95 } 96 97 static void drm_minor_alloc_release(struct drm_device *dev, void *data) 98 { 99 struct drm_minor *minor = data; 100 unsigned long flags; 101 102 WARN_ON(dev != minor->dev); 103 104 put_device(minor->kdev); 105 106 spin_lock_irqsave(&drm_minor_lock, flags); 107 idr_remove(&drm_minors_idr, minor->index); 108 spin_unlock_irqrestore(&drm_minor_lock, flags); 109 } 110 111 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 112 { 113 struct drm_minor *minor; 114 unsigned long flags; 115 int r; 116 117 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); 118 if (!minor) 119 return -ENOMEM; 120 121 minor->type = type; 122 minor->dev = dev; 123 124 idr_preload(GFP_KERNEL); 125 spin_lock_irqsave(&drm_minor_lock, flags); 126 r = idr_alloc(&drm_minors_idr, 127 NULL, 128 64 * type, 129 64 * (type + 1), 130 GFP_NOWAIT); 131 spin_unlock_irqrestore(&drm_minor_lock, flags); 132 idr_preload_end(); 133 134 if (r < 0) 135 return r; 136 137 minor->index = r; 138 139 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); 140 if (r) 141 return r; 142 143 minor->kdev = drm_sysfs_minor_alloc(minor); 144 if (IS_ERR(minor->kdev)) 145 return PTR_ERR(minor->kdev); 146 147 *drm_minor_get_slot(dev, type) = minor; 148 return 0; 149 } 150 151 static int drm_minor_register(struct drm_device *dev, unsigned int type) 152 { 153 struct drm_minor *minor; 154 unsigned long flags; 155 int ret; 156 157 DRM_DEBUG("\n"); 158 159 minor = *drm_minor_get_slot(dev, type); 160 if (!minor) 161 return 0; 162 163 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 164 if (ret) { 165 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 166 goto err_debugfs; 167 } 168 169 ret = device_add(minor->kdev); 170 if (ret) 171 goto err_debugfs; 172 173 /* replace NULL with @minor so lookups will succeed from now on */ 174 spin_lock_irqsave(&drm_minor_lock, flags); 175 idr_replace(&drm_minors_idr, minor, minor->index); 176 spin_unlock_irqrestore(&drm_minor_lock, flags); 177 178 DRM_DEBUG("new minor registered %d\n", minor->index); 179 return 0; 180 181 err_debugfs: 182 drm_debugfs_cleanup(minor); 183 return ret; 184 } 185 186 static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 187 { 188 struct drm_minor *minor; 189 unsigned long flags; 190 191 minor = *drm_minor_get_slot(dev, type); 192 if (!minor || !device_is_registered(minor->kdev)) 193 return; 194 195 /* replace @minor with NULL so lookups will fail from now on */ 196 spin_lock_irqsave(&drm_minor_lock, flags); 197 idr_replace(&drm_minors_idr, NULL, minor->index); 198 spin_unlock_irqrestore(&drm_minor_lock, flags); 199 200 device_del(minor->kdev); 201 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 202 drm_debugfs_cleanup(minor); 203 } 204 205 /* 206 * Looks up the given minor-ID and returns the respective DRM-minor object. The 207 * refence-count of the underlying device is increased so you must release this 208 * object with drm_minor_release(). 209 * 210 * As long as you hold this minor, it is guaranteed that the object and the 211 * minor->dev pointer will stay valid! However, the device may get unplugged and 212 * unregistered while you hold the minor. 213 */ 214 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 215 { 216 struct drm_minor *minor; 217 unsigned long flags; 218 219 spin_lock_irqsave(&drm_minor_lock, flags); 220 minor = idr_find(&drm_minors_idr, minor_id); 221 if (minor) 222 drm_dev_get(minor->dev); 223 spin_unlock_irqrestore(&drm_minor_lock, flags); 224 225 if (!minor) { 226 return ERR_PTR(-ENODEV); 227 } else if (drm_dev_is_unplugged(minor->dev)) { 228 drm_dev_put(minor->dev); 229 return ERR_PTR(-ENODEV); 230 } 231 232 return minor; 233 } 234 235 void drm_minor_release(struct drm_minor *minor) 236 { 237 drm_dev_put(minor->dev); 238 } 239 240 /** 241 * DOC: driver instance overview 242 * 243 * A device instance for a drm driver is represented by &struct drm_device. This 244 * is allocated and initialized with devm_drm_dev_alloc(), usually from 245 * bus-specific ->probe() callbacks implemented by the driver. The driver then 246 * needs to initialize all the various subsystems for the drm device like memory 247 * management, vblank handling, modesetting support and initial output 248 * configuration plus obviously initialize all the corresponding hardware bits. 249 * Finally when everything is up and running and ready for userspace the device 250 * instance can be published using drm_dev_register(). 251 * 252 * There is also deprecated support for initializing device instances using 253 * bus-specific helpers and the &drm_driver.load callback. But due to 254 * backwards-compatibility needs the device instance have to be published too 255 * early, which requires unpretty global locking to make safe and is therefore 256 * only support for existing drivers not yet converted to the new scheme. 257 * 258 * When cleaning up a device instance everything needs to be done in reverse: 259 * First unpublish the device instance with drm_dev_unregister(). Then clean up 260 * any other resources allocated at device initialization and drop the driver's 261 * reference to &drm_device using drm_dev_put(). 262 * 263 * Note that any allocation or resource which is visible to userspace must be 264 * released only when the final drm_dev_put() is called, and not when the 265 * driver is unbound from the underlying physical struct &device. Best to use 266 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and 267 * related functions. 268 * 269 * devres managed resources like devm_kmalloc() can only be used for resources 270 * directly related to the underlying hardware device, and only used in code 271 * paths fully protected by drm_dev_enter() and drm_dev_exit(). 272 * 273 * Display driver example 274 * ~~~~~~~~~~~~~~~~~~~~~~ 275 * 276 * The following example shows a typical structure of a DRM display driver. 277 * The example focus on the probe() function and the other functions that is 278 * almost always present and serves as a demonstration of devm_drm_dev_alloc(). 279 * 280 * .. code-block:: c 281 * 282 * struct driver_device { 283 * struct drm_device drm; 284 * void *userspace_facing; 285 * struct clk *pclk; 286 * }; 287 * 288 * static const struct drm_driver driver_drm_driver = { 289 * [...] 290 * }; 291 * 292 * static int driver_probe(struct platform_device *pdev) 293 * { 294 * struct driver_device *priv; 295 * struct drm_device *drm; 296 * int ret; 297 * 298 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, 299 * struct driver_device, drm); 300 * if (IS_ERR(priv)) 301 * return PTR_ERR(priv); 302 * drm = &priv->drm; 303 * 304 * ret = drmm_mode_config_init(drm); 305 * if (ret) 306 * return ret; 307 * 308 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); 309 * if (!priv->userspace_facing) 310 * return -ENOMEM; 311 * 312 * priv->pclk = devm_clk_get(dev, "PCLK"); 313 * if (IS_ERR(priv->pclk)) 314 * return PTR_ERR(priv->pclk); 315 * 316 * // Further setup, display pipeline etc 317 * 318 * platform_set_drvdata(pdev, drm); 319 * 320 * drm_mode_config_reset(drm); 321 * 322 * ret = drm_dev_register(drm); 323 * if (ret) 324 * return ret; 325 * 326 * drm_fbdev_generic_setup(drm, 32); 327 * 328 * return 0; 329 * } 330 * 331 * // This function is called before the devm_ resources are released 332 * static int driver_remove(struct platform_device *pdev) 333 * { 334 * struct drm_device *drm = platform_get_drvdata(pdev); 335 * 336 * drm_dev_unregister(drm); 337 * drm_atomic_helper_shutdown(drm) 338 * 339 * return 0; 340 * } 341 * 342 * // This function is called on kernel restart and shutdown 343 * static void driver_shutdown(struct platform_device *pdev) 344 * { 345 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 346 * } 347 * 348 * static int __maybe_unused driver_pm_suspend(struct device *dev) 349 * { 350 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); 351 * } 352 * 353 * static int __maybe_unused driver_pm_resume(struct device *dev) 354 * { 355 * drm_mode_config_helper_resume(dev_get_drvdata(dev)); 356 * 357 * return 0; 358 * } 359 * 360 * static const struct dev_pm_ops driver_pm_ops = { 361 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) 362 * }; 363 * 364 * static struct platform_driver driver_driver = { 365 * .driver = { 366 * [...] 367 * .pm = &driver_pm_ops, 368 * }, 369 * .probe = driver_probe, 370 * .remove = driver_remove, 371 * .shutdown = driver_shutdown, 372 * }; 373 * module_platform_driver(driver_driver); 374 * 375 * Drivers that want to support device unplugging (USB, DT overlay unload) should 376 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect 377 * regions that is accessing device resources to prevent use after they're 378 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one 379 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before 380 * drm_atomic_helper_shutdown() is called. This means that if the disable code 381 * paths are protected, they will not run on regular driver module unload, 382 * possibly leaving the hardware enabled. 383 */ 384 385 /** 386 * drm_put_dev - Unregister and release a DRM device 387 * @dev: DRM device 388 * 389 * Called at module unload time or when a PCI device is unplugged. 390 * 391 * Cleans up all DRM device, calling drm_lastclose(). 392 * 393 * Note: Use of this function is deprecated. It will eventually go away 394 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly 395 * instead to make sure that the device isn't userspace accessible any more 396 * while teardown is in progress, ensuring that userspace can't access an 397 * inconsistent state. 398 */ 399 void drm_put_dev(struct drm_device *dev) 400 { 401 DRM_DEBUG("\n"); 402 403 if (!dev) { 404 DRM_ERROR("cleanup called no dev\n"); 405 return; 406 } 407 408 drm_dev_unregister(dev); 409 drm_dev_put(dev); 410 } 411 EXPORT_SYMBOL(drm_put_dev); 412 413 /** 414 * drm_dev_enter - Enter device critical section 415 * @dev: DRM device 416 * @idx: Pointer to index that will be passed to the matching drm_dev_exit() 417 * 418 * This function marks and protects the beginning of a section that should not 419 * be entered after the device has been unplugged. The section end is marked 420 * with drm_dev_exit(). Calls to this function can be nested. 421 * 422 * Returns: 423 * True if it is OK to enter the section, false otherwise. 424 */ 425 bool drm_dev_enter(struct drm_device *dev, int *idx) 426 { 427 *idx = srcu_read_lock(&drm_unplug_srcu); 428 429 if (dev->unplugged) { 430 srcu_read_unlock(&drm_unplug_srcu, *idx); 431 return false; 432 } 433 434 return true; 435 } 436 EXPORT_SYMBOL(drm_dev_enter); 437 438 /** 439 * drm_dev_exit - Exit device critical section 440 * @idx: index returned from drm_dev_enter() 441 * 442 * This function marks the end of a section that should not be entered after 443 * the device has been unplugged. 444 */ 445 void drm_dev_exit(int idx) 446 { 447 srcu_read_unlock(&drm_unplug_srcu, idx); 448 } 449 EXPORT_SYMBOL(drm_dev_exit); 450 451 /** 452 * drm_dev_unplug - unplug a DRM device 453 * @dev: DRM device 454 * 455 * This unplugs a hotpluggable DRM device, which makes it inaccessible to 456 * userspace operations. Entry-points can use drm_dev_enter() and 457 * drm_dev_exit() to protect device resources in a race free manner. This 458 * essentially unregisters the device like drm_dev_unregister(), but can be 459 * called while there are still open users of @dev. 460 */ 461 void drm_dev_unplug(struct drm_device *dev) 462 { 463 /* 464 * After synchronizing any critical read section is guaranteed to see 465 * the new value of ->unplugged, and any critical section which might 466 * still have seen the old value of ->unplugged is guaranteed to have 467 * finished. 468 */ 469 dev->unplugged = true; 470 synchronize_srcu(&drm_unplug_srcu); 471 472 drm_dev_unregister(dev); 473 474 /* Clear all CPU mappings pointing to this device */ 475 unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1); 476 } 477 EXPORT_SYMBOL(drm_dev_unplug); 478 479 /* 480 * DRM internal mount 481 * We want to be able to allocate our own "struct address_space" to control 482 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 483 * stand-alone address_space objects, so we need an underlying inode. As there 484 * is no way to allocate an independent inode easily, we need a fake internal 485 * VFS mount-point. 486 * 487 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 488 * frees it again. You are allowed to use iget() and iput() to get references to 489 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 490 * drm_fs_inode_free() call (which does not have to be the last iput()). 491 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 492 * between multiple inode-users. You could, technically, call 493 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 494 * iput(), but this way you'd end up with a new vfsmount for each inode. 495 */ 496 497 static int drm_fs_cnt; 498 static struct vfsmount *drm_fs_mnt; 499 500 static int drm_fs_init_fs_context(struct fs_context *fc) 501 { 502 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM; 503 } 504 505 static struct file_system_type drm_fs_type = { 506 .name = "drm", 507 .owner = THIS_MODULE, 508 .init_fs_context = drm_fs_init_fs_context, 509 .kill_sb = kill_anon_super, 510 }; 511 512 static struct inode *drm_fs_inode_new(void) 513 { 514 struct inode *inode; 515 int r; 516 517 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 518 if (r < 0) { 519 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 520 return ERR_PTR(r); 521 } 522 523 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 524 if (IS_ERR(inode)) 525 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 526 527 return inode; 528 } 529 530 static void drm_fs_inode_free(struct inode *inode) 531 { 532 if (inode) { 533 iput(inode); 534 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 535 } 536 } 537 538 /** 539 * DOC: component helper usage recommendations 540 * 541 * DRM drivers that drive hardware where a logical device consists of a pile of 542 * independent hardware blocks are recommended to use the :ref:`component helper 543 * library<component>`. For consistency and better options for code reuse the 544 * following guidelines apply: 545 * 546 * - The entire device initialization procedure should be run from the 547 * &component_master_ops.master_bind callback, starting with 548 * devm_drm_dev_alloc(), then binding all components with 549 * component_bind_all() and finishing with drm_dev_register(). 550 * 551 * - The opaque pointer passed to all components through component_bind_all() 552 * should point at &struct drm_device of the device instance, not some driver 553 * specific private structure. 554 * 555 * - The component helper fills the niche where further standardization of 556 * interfaces is not practical. When there already is, or will be, a 557 * standardized interface like &drm_bridge or &drm_panel, providing its own 558 * functions to find such components at driver load time, like 559 * drm_of_find_panel_or_bridge(), then the component helper should not be 560 * used. 561 */ 562 563 static void drm_dev_init_release(struct drm_device *dev, void *res) 564 { 565 drm_legacy_ctxbitmap_cleanup(dev); 566 drm_legacy_remove_map_hash(dev); 567 drm_fs_inode_free(dev->anon_inode); 568 569 put_device(dev->dev); 570 /* Prevent use-after-free in drm_managed_release when debugging is 571 * enabled. Slightly awkward, but can't really be helped. */ 572 dev->dev = NULL; 573 mutex_destroy(&dev->master_mutex); 574 mutex_destroy(&dev->clientlist_mutex); 575 mutex_destroy(&dev->filelist_mutex); 576 mutex_destroy(&dev->struct_mutex); 577 drm_legacy_destroy_members(dev); 578 } 579 580 static int drm_dev_init(struct drm_device *dev, 581 const struct drm_driver *driver, 582 struct device *parent) 583 { 584 int ret; 585 586 if (!drm_core_init_complete) { 587 DRM_ERROR("DRM core is not initialized\n"); 588 return -ENODEV; 589 } 590 591 if (WARN_ON(!parent)) 592 return -EINVAL; 593 594 kref_init(&dev->ref); 595 dev->dev = get_device(parent); 596 dev->driver = driver; 597 598 INIT_LIST_HEAD(&dev->managed.resources); 599 spin_lock_init(&dev->managed.lock); 600 601 /* no per-device feature limits by default */ 602 dev->driver_features = ~0u; 603 604 drm_legacy_init_members(dev); 605 INIT_LIST_HEAD(&dev->filelist); 606 INIT_LIST_HEAD(&dev->filelist_internal); 607 INIT_LIST_HEAD(&dev->clientlist); 608 INIT_LIST_HEAD(&dev->vblank_event_list); 609 610 spin_lock_init(&dev->event_lock); 611 mutex_init(&dev->struct_mutex); 612 mutex_init(&dev->filelist_mutex); 613 mutex_init(&dev->clientlist_mutex); 614 mutex_init(&dev->master_mutex); 615 616 ret = drmm_add_action(dev, drm_dev_init_release, NULL); 617 if (ret) 618 return ret; 619 620 dev->anon_inode = drm_fs_inode_new(); 621 if (IS_ERR(dev->anon_inode)) { 622 ret = PTR_ERR(dev->anon_inode); 623 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 624 goto err; 625 } 626 627 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 628 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 629 if (ret) 630 goto err; 631 } 632 633 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); 634 if (ret) 635 goto err; 636 637 ret = drm_legacy_create_map_hash(dev); 638 if (ret) 639 goto err; 640 641 drm_legacy_ctxbitmap_init(dev); 642 643 if (drm_core_check_feature(dev, DRIVER_GEM)) { 644 ret = drm_gem_init(dev); 645 if (ret) { 646 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 647 goto err; 648 } 649 } 650 651 ret = drm_dev_set_unique(dev, dev_name(parent)); 652 if (ret) 653 goto err; 654 655 return 0; 656 657 err: 658 drm_managed_release(dev); 659 660 return ret; 661 } 662 663 static void devm_drm_dev_init_release(void *data) 664 { 665 drm_dev_put(data); 666 } 667 668 static int devm_drm_dev_init(struct device *parent, 669 struct drm_device *dev, 670 const struct drm_driver *driver) 671 { 672 int ret; 673 674 ret = drm_dev_init(dev, driver, parent); 675 if (ret) 676 return ret; 677 678 return devm_add_action_or_reset(parent, 679 devm_drm_dev_init_release, dev); 680 } 681 682 void *__devm_drm_dev_alloc(struct device *parent, 683 const struct drm_driver *driver, 684 size_t size, size_t offset) 685 { 686 void *container; 687 struct drm_device *drm; 688 int ret; 689 690 container = kzalloc(size, GFP_KERNEL); 691 if (!container) 692 return ERR_PTR(-ENOMEM); 693 694 drm = container + offset; 695 ret = devm_drm_dev_init(parent, drm, driver); 696 if (ret) { 697 kfree(container); 698 return ERR_PTR(ret); 699 } 700 drmm_add_final_kfree(drm, container); 701 702 return container; 703 } 704 EXPORT_SYMBOL(__devm_drm_dev_alloc); 705 706 /** 707 * drm_dev_alloc - Allocate new DRM device 708 * @driver: DRM driver to allocate device for 709 * @parent: Parent device object 710 * 711 * This is the deprecated version of devm_drm_dev_alloc(), which does not support 712 * subclassing through embedding the struct &drm_device in a driver private 713 * structure, and which does not support automatic cleanup through devres. 714 * 715 * RETURNS: 716 * Pointer to new DRM device, or ERR_PTR on failure. 717 */ 718 struct drm_device *drm_dev_alloc(const struct drm_driver *driver, 719 struct device *parent) 720 { 721 struct drm_device *dev; 722 int ret; 723 724 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 725 if (!dev) 726 return ERR_PTR(-ENOMEM); 727 728 ret = drm_dev_init(dev, driver, parent); 729 if (ret) { 730 kfree(dev); 731 return ERR_PTR(ret); 732 } 733 734 drmm_add_final_kfree(dev, dev); 735 736 return dev; 737 } 738 EXPORT_SYMBOL(drm_dev_alloc); 739 740 static void drm_dev_release(struct kref *ref) 741 { 742 struct drm_device *dev = container_of(ref, struct drm_device, ref); 743 744 if (dev->driver->release) 745 dev->driver->release(dev); 746 747 drm_managed_release(dev); 748 749 kfree(dev->managed.final_kfree); 750 } 751 752 /** 753 * drm_dev_get - Take reference of a DRM device 754 * @dev: device to take reference of or NULL 755 * 756 * This increases the ref-count of @dev by one. You *must* already own a 757 * reference when calling this. Use drm_dev_put() to drop this reference 758 * again. 759 * 760 * This function never fails. However, this function does not provide *any* 761 * guarantee whether the device is alive or running. It only provides a 762 * reference to the object and the memory associated with it. 763 */ 764 void drm_dev_get(struct drm_device *dev) 765 { 766 if (dev) 767 kref_get(&dev->ref); 768 } 769 EXPORT_SYMBOL(drm_dev_get); 770 771 /** 772 * drm_dev_put - Drop reference of a DRM device 773 * @dev: device to drop reference of or NULL 774 * 775 * This decreases the ref-count of @dev by one. The device is destroyed if the 776 * ref-count drops to zero. 777 */ 778 void drm_dev_put(struct drm_device *dev) 779 { 780 if (dev) 781 kref_put(&dev->ref, drm_dev_release); 782 } 783 EXPORT_SYMBOL(drm_dev_put); 784 785 static int create_compat_control_link(struct drm_device *dev) 786 { 787 struct drm_minor *minor; 788 char *name; 789 int ret; 790 791 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 792 return 0; 793 794 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 795 if (!minor) 796 return 0; 797 798 /* 799 * Some existing userspace out there uses the existing of the controlD* 800 * sysfs files to figure out whether it's a modeset driver. It only does 801 * readdir, hence a symlink is sufficient (and the least confusing 802 * option). Otherwise controlD* is entirely unused. 803 * 804 * Old controlD chardev have been allocated in the range 805 * 64-127. 806 */ 807 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 808 if (!name) 809 return -ENOMEM; 810 811 ret = sysfs_create_link(minor->kdev->kobj.parent, 812 &minor->kdev->kobj, 813 name); 814 815 kfree(name); 816 817 return ret; 818 } 819 820 static void remove_compat_control_link(struct drm_device *dev) 821 { 822 struct drm_minor *minor; 823 char *name; 824 825 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 826 return; 827 828 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 829 if (!minor) 830 return; 831 832 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 833 if (!name) 834 return; 835 836 sysfs_remove_link(minor->kdev->kobj.parent, name); 837 838 kfree(name); 839 } 840 841 /** 842 * drm_dev_register - Register DRM device 843 * @dev: Device to register 844 * @flags: Flags passed to the driver's .load() function 845 * 846 * Register the DRM device @dev with the system, advertise device to user-space 847 * and start normal device operation. @dev must be initialized via drm_dev_init() 848 * previously. 849 * 850 * Never call this twice on any device! 851 * 852 * NOTE: To ensure backward compatibility with existing drivers method this 853 * function calls the &drm_driver.load method after registering the device 854 * nodes, creating race conditions. Usage of the &drm_driver.load methods is 855 * therefore deprecated, drivers must perform all initialization before calling 856 * drm_dev_register(). 857 * 858 * RETURNS: 859 * 0 on success, negative error code on failure. 860 */ 861 int drm_dev_register(struct drm_device *dev, unsigned long flags) 862 { 863 const struct drm_driver *driver = dev->driver; 864 int ret; 865 866 if (!driver->load) 867 drm_mode_config_validate(dev); 868 869 WARN_ON(!dev->managed.final_kfree); 870 871 if (drm_dev_needs_global_mutex(dev)) 872 mutex_lock(&drm_global_mutex); 873 874 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 875 if (ret) 876 goto err_minors; 877 878 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); 879 if (ret) 880 goto err_minors; 881 882 ret = create_compat_control_link(dev); 883 if (ret) 884 goto err_minors; 885 886 dev->registered = true; 887 888 if (dev->driver->load) { 889 ret = dev->driver->load(dev, flags); 890 if (ret) 891 goto err_minors; 892 } 893 894 if (drm_core_check_feature(dev, DRIVER_MODESET)) 895 drm_modeset_register_all(dev); 896 897 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 898 driver->name, driver->major, driver->minor, 899 driver->patchlevel, driver->date, 900 dev->dev ? dev_name(dev->dev) : "virtual device", 901 dev->primary->index); 902 903 goto out_unlock; 904 905 err_minors: 906 remove_compat_control_link(dev); 907 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 908 drm_minor_unregister(dev, DRM_MINOR_RENDER); 909 out_unlock: 910 if (drm_dev_needs_global_mutex(dev)) 911 mutex_unlock(&drm_global_mutex); 912 return ret; 913 } 914 EXPORT_SYMBOL(drm_dev_register); 915 916 /** 917 * drm_dev_unregister - Unregister DRM device 918 * @dev: Device to unregister 919 * 920 * Unregister the DRM device from the system. This does the reverse of 921 * drm_dev_register() but does not deallocate the device. The caller must call 922 * drm_dev_put() to drop their final reference. 923 * 924 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), 925 * which can be called while there are still open users of @dev. 926 * 927 * This should be called first in the device teardown code to make sure 928 * userspace can't access the device instance any more. 929 */ 930 void drm_dev_unregister(struct drm_device *dev) 931 { 932 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 933 drm_lastclose(dev); 934 935 dev->registered = false; 936 937 drm_client_dev_unregister(dev); 938 939 if (drm_core_check_feature(dev, DRIVER_MODESET)) 940 drm_modeset_unregister_all(dev); 941 942 if (dev->driver->unload) 943 dev->driver->unload(dev); 944 945 drm_legacy_pci_agp_destroy(dev); 946 drm_legacy_rmmaps(dev); 947 948 remove_compat_control_link(dev); 949 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 950 drm_minor_unregister(dev, DRM_MINOR_RENDER); 951 } 952 EXPORT_SYMBOL(drm_dev_unregister); 953 954 /** 955 * drm_dev_set_unique - Set the unique name of a DRM device 956 * @dev: device of which to set the unique name 957 * @name: unique name 958 * 959 * Sets the unique name of a DRM device using the specified string. This is 960 * already done by drm_dev_init(), drivers should only override the default 961 * unique name for backwards compatibility reasons. 962 * 963 * Return: 0 on success or a negative error code on failure. 964 */ 965 int drm_dev_set_unique(struct drm_device *dev, const char *name) 966 { 967 drmm_kfree(dev, dev->unique); 968 dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); 969 970 return dev->unique ? 0 : -ENOMEM; 971 } 972 EXPORT_SYMBOL(drm_dev_set_unique); 973 974 /* 975 * DRM Core 976 * The DRM core module initializes all global DRM objects and makes them 977 * available to drivers. Once setup, drivers can probe their respective 978 * devices. 979 * Currently, core management includes: 980 * - The "DRM-Global" key/value database 981 * - Global ID management for connectors 982 * - DRM major number allocation 983 * - DRM minor management 984 * - DRM sysfs class 985 * - DRM debugfs root 986 * 987 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 988 * interface registered on a DRM device, you can request minor numbers from DRM 989 * core. DRM core takes care of major-number management and char-dev 990 * registration. A stub ->open() callback forwards any open() requests to the 991 * registered minor. 992 */ 993 994 static int drm_stub_open(struct inode *inode, struct file *filp) 995 { 996 const struct file_operations *new_fops; 997 struct drm_minor *minor; 998 int err; 999 1000 DRM_DEBUG("\n"); 1001 1002 minor = drm_minor_acquire(iminor(inode)); 1003 if (IS_ERR(minor)) 1004 return PTR_ERR(minor); 1005 1006 new_fops = fops_get(minor->dev->driver->fops); 1007 if (!new_fops) { 1008 err = -ENODEV; 1009 goto out; 1010 } 1011 1012 replace_fops(filp, new_fops); 1013 if (filp->f_op->open) 1014 err = filp->f_op->open(inode, filp); 1015 else 1016 err = 0; 1017 1018 out: 1019 drm_minor_release(minor); 1020 1021 return err; 1022 } 1023 1024 static const struct file_operations drm_stub_fops = { 1025 .owner = THIS_MODULE, 1026 .open = drm_stub_open, 1027 .llseek = noop_llseek, 1028 }; 1029 1030 static void drm_core_exit(void) 1031 { 1032 unregister_chrdev(DRM_MAJOR, "drm"); 1033 debugfs_remove(drm_debugfs_root); 1034 drm_sysfs_destroy(); 1035 idr_destroy(&drm_minors_idr); 1036 drm_connector_ida_destroy(); 1037 } 1038 1039 static int __init drm_core_init(void) 1040 { 1041 int ret; 1042 1043 drm_connector_ida_init(); 1044 idr_init(&drm_minors_idr); 1045 drm_memcpy_init_early(); 1046 1047 ret = drm_sysfs_init(); 1048 if (ret < 0) { 1049 DRM_ERROR("Cannot create DRM class: %d\n", ret); 1050 goto error; 1051 } 1052 1053 drm_debugfs_root = debugfs_create_dir("dri", NULL); 1054 1055 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); 1056 if (ret < 0) 1057 goto error; 1058 1059 drm_core_init_complete = true; 1060 1061 DRM_DEBUG("Initialized\n"); 1062 return 0; 1063 1064 error: 1065 drm_core_exit(); 1066 return ret; 1067 } 1068 1069 module_init(drm_core_init); 1070 module_exit(drm_core_exit); 1071