1 /* 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 3 * 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Author Rickard E. (Rik) Faith <faith@valinux.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/mount.h> 34 #include <linux/pseudo_fs.h> 35 #include <linux/slab.h> 36 #include <linux/srcu.h> 37 #include <linux/xarray.h> 38 39 #include <drm/drm_accel.h> 40 #include <drm/drm_cache.h> 41 #include <drm/drm_client.h> 42 #include <drm/drm_color_mgmt.h> 43 #include <drm/drm_drv.h> 44 #include <drm/drm_file.h> 45 #include <drm/drm_managed.h> 46 #include <drm/drm_mode_object.h> 47 #include <drm/drm_print.h> 48 #include <drm/drm_privacy_screen_machine.h> 49 50 #include "drm_crtc_internal.h" 51 #include "drm_internal.h" 52 #include "drm_legacy.h" 53 54 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); 55 MODULE_DESCRIPTION("DRM shared core routines"); 56 MODULE_LICENSE("GPL and additional rights"); 57 58 static DEFINE_XARRAY_ALLOC(drm_minors_xa); 59 60 /* 61 * If the drm core fails to init for whatever reason, 62 * we should prevent any drivers from registering with it. 63 * It's best to check this at drm_dev_init(), as some drivers 64 * prefer to embed struct drm_device into their own device 65 * structure and call drm_dev_init() themselves. 66 */ 67 static bool drm_core_init_complete; 68 69 static struct dentry *drm_debugfs_root; 70 71 DEFINE_STATIC_SRCU(drm_unplug_srcu); 72 73 /* 74 * DRM Minors 75 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 76 * of them is represented by a drm_minor object. Depending on the capabilities 77 * of the device-driver, different interfaces are registered. 78 * 79 * Minors can be accessed via dev->$minor_name. This pointer is either 80 * NULL or a valid drm_minor pointer and stays valid as long as the device is 81 * valid. This means, DRM minors have the same life-time as the underlying 82 * device. However, this doesn't mean that the minor is active. Minors are 83 * registered and unregistered dynamically according to device-state. 84 */ 85 86 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 87 enum drm_minor_type type) 88 { 89 switch (type) { 90 case DRM_MINOR_PRIMARY: 91 return &dev->primary; 92 case DRM_MINOR_RENDER: 93 return &dev->render; 94 case DRM_MINOR_ACCEL: 95 return &dev->accel; 96 default: 97 BUG(); 98 } 99 } 100 101 static void drm_minor_alloc_release(struct drm_device *dev, void *data) 102 { 103 struct drm_minor *minor = data; 104 105 WARN_ON(dev != minor->dev); 106 107 put_device(minor->kdev); 108 109 if (minor->type == DRM_MINOR_ACCEL) 110 accel_minor_remove(minor->index); 111 else 112 xa_erase(&drm_minors_xa, minor->index); 113 } 114 115 #define DRM_MINOR_LIMIT(t) ({ typeof(t) _t = (t); XA_LIMIT(64 * _t, 64 * _t + 63); }) 116 117 static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type) 118 { 119 struct drm_minor *minor; 120 int index, r; 121 122 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); 123 if (!minor) 124 return -ENOMEM; 125 126 minor->type = type; 127 minor->dev = dev; 128 129 if (type == DRM_MINOR_ACCEL) { 130 r = accel_minor_alloc(); 131 index = r; 132 } else { 133 r = xa_alloc(&drm_minors_xa, &index, NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL); 134 } 135 136 if (r < 0) 137 return r; 138 139 minor->index = index; 140 141 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); 142 if (r) 143 return r; 144 145 minor->kdev = drm_sysfs_minor_alloc(minor); 146 if (IS_ERR(minor->kdev)) 147 return PTR_ERR(minor->kdev); 148 149 *drm_minor_get_slot(dev, type) = minor; 150 return 0; 151 } 152 153 static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) 154 { 155 struct drm_minor *minor; 156 void *entry; 157 int ret; 158 159 DRM_DEBUG("\n"); 160 161 minor = *drm_minor_get_slot(dev, type); 162 if (!minor) 163 return 0; 164 165 if (minor->type == DRM_MINOR_ACCEL) { 166 accel_debugfs_init(minor, minor->index); 167 } else { 168 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 169 if (ret) { 170 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 171 goto err_debugfs; 172 } 173 } 174 175 ret = device_add(minor->kdev); 176 if (ret) 177 goto err_debugfs; 178 179 /* replace NULL with @minor so lookups will succeed from now on */ 180 if (minor->type == DRM_MINOR_ACCEL) { 181 accel_minor_replace(minor, minor->index); 182 } else { 183 entry = xa_store(&drm_minors_xa, minor->index, minor, GFP_KERNEL); 184 if (xa_is_err(entry)) { 185 ret = xa_err(entry); 186 goto err_debugfs; 187 } 188 WARN_ON(entry); 189 } 190 191 DRM_DEBUG("new minor registered %d\n", minor->index); 192 return 0; 193 194 err_debugfs: 195 drm_debugfs_cleanup(minor); 196 return ret; 197 } 198 199 static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type) 200 { 201 struct drm_minor *minor; 202 203 minor = *drm_minor_get_slot(dev, type); 204 if (!minor || !device_is_registered(minor->kdev)) 205 return; 206 207 /* replace @minor with NULL so lookups will fail from now on */ 208 if (minor->type == DRM_MINOR_ACCEL) 209 accel_minor_replace(NULL, minor->index); 210 else 211 xa_store(&drm_minors_xa, minor->index, NULL, GFP_KERNEL); 212 213 device_del(minor->kdev); 214 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 215 drm_debugfs_cleanup(minor); 216 } 217 218 /* 219 * Looks up the given minor-ID and returns the respective DRM-minor object. The 220 * refence-count of the underlying device is increased so you must release this 221 * object with drm_minor_release(). 222 * 223 * As long as you hold this minor, it is guaranteed that the object and the 224 * minor->dev pointer will stay valid! However, the device may get unplugged and 225 * unregistered while you hold the minor. 226 */ 227 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 228 { 229 struct drm_minor *minor; 230 231 xa_lock(&drm_minors_xa); 232 minor = xa_load(&drm_minors_xa, minor_id); 233 if (minor) 234 drm_dev_get(minor->dev); 235 xa_unlock(&drm_minors_xa); 236 237 if (!minor) { 238 return ERR_PTR(-ENODEV); 239 } else if (drm_dev_is_unplugged(minor->dev)) { 240 drm_dev_put(minor->dev); 241 return ERR_PTR(-ENODEV); 242 } 243 244 return minor; 245 } 246 247 void drm_minor_release(struct drm_minor *minor) 248 { 249 drm_dev_put(minor->dev); 250 } 251 252 /** 253 * DOC: driver instance overview 254 * 255 * A device instance for a drm driver is represented by &struct drm_device. This 256 * is allocated and initialized with devm_drm_dev_alloc(), usually from 257 * bus-specific ->probe() callbacks implemented by the driver. The driver then 258 * needs to initialize all the various subsystems for the drm device like memory 259 * management, vblank handling, modesetting support and initial output 260 * configuration plus obviously initialize all the corresponding hardware bits. 261 * Finally when everything is up and running and ready for userspace the device 262 * instance can be published using drm_dev_register(). 263 * 264 * There is also deprecated support for initializing device instances using 265 * bus-specific helpers and the &drm_driver.load callback. But due to 266 * backwards-compatibility needs the device instance have to be published too 267 * early, which requires unpretty global locking to make safe and is therefore 268 * only support for existing drivers not yet converted to the new scheme. 269 * 270 * When cleaning up a device instance everything needs to be done in reverse: 271 * First unpublish the device instance with drm_dev_unregister(). Then clean up 272 * any other resources allocated at device initialization and drop the driver's 273 * reference to &drm_device using drm_dev_put(). 274 * 275 * Note that any allocation or resource which is visible to userspace must be 276 * released only when the final drm_dev_put() is called, and not when the 277 * driver is unbound from the underlying physical struct &device. Best to use 278 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and 279 * related functions. 280 * 281 * devres managed resources like devm_kmalloc() can only be used for resources 282 * directly related to the underlying hardware device, and only used in code 283 * paths fully protected by drm_dev_enter() and drm_dev_exit(). 284 * 285 * Display driver example 286 * ~~~~~~~~~~~~~~~~~~~~~~ 287 * 288 * The following example shows a typical structure of a DRM display driver. 289 * The example focus on the probe() function and the other functions that is 290 * almost always present and serves as a demonstration of devm_drm_dev_alloc(). 291 * 292 * .. code-block:: c 293 * 294 * struct driver_device { 295 * struct drm_device drm; 296 * void *userspace_facing; 297 * struct clk *pclk; 298 * }; 299 * 300 * static const struct drm_driver driver_drm_driver = { 301 * [...] 302 * }; 303 * 304 * static int driver_probe(struct platform_device *pdev) 305 * { 306 * struct driver_device *priv; 307 * struct drm_device *drm; 308 * int ret; 309 * 310 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, 311 * struct driver_device, drm); 312 * if (IS_ERR(priv)) 313 * return PTR_ERR(priv); 314 * drm = &priv->drm; 315 * 316 * ret = drmm_mode_config_init(drm); 317 * if (ret) 318 * return ret; 319 * 320 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); 321 * if (!priv->userspace_facing) 322 * return -ENOMEM; 323 * 324 * priv->pclk = devm_clk_get(dev, "PCLK"); 325 * if (IS_ERR(priv->pclk)) 326 * return PTR_ERR(priv->pclk); 327 * 328 * // Further setup, display pipeline etc 329 * 330 * platform_set_drvdata(pdev, drm); 331 * 332 * drm_mode_config_reset(drm); 333 * 334 * ret = drm_dev_register(drm); 335 * if (ret) 336 * return ret; 337 * 338 * drm_fbdev_generic_setup(drm, 32); 339 * 340 * return 0; 341 * } 342 * 343 * // This function is called before the devm_ resources are released 344 * static int driver_remove(struct platform_device *pdev) 345 * { 346 * struct drm_device *drm = platform_get_drvdata(pdev); 347 * 348 * drm_dev_unregister(drm); 349 * drm_atomic_helper_shutdown(drm) 350 * 351 * return 0; 352 * } 353 * 354 * // This function is called on kernel restart and shutdown 355 * static void driver_shutdown(struct platform_device *pdev) 356 * { 357 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 358 * } 359 * 360 * static int __maybe_unused driver_pm_suspend(struct device *dev) 361 * { 362 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); 363 * } 364 * 365 * static int __maybe_unused driver_pm_resume(struct device *dev) 366 * { 367 * drm_mode_config_helper_resume(dev_get_drvdata(dev)); 368 * 369 * return 0; 370 * } 371 * 372 * static const struct dev_pm_ops driver_pm_ops = { 373 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) 374 * }; 375 * 376 * static struct platform_driver driver_driver = { 377 * .driver = { 378 * [...] 379 * .pm = &driver_pm_ops, 380 * }, 381 * .probe = driver_probe, 382 * .remove = driver_remove, 383 * .shutdown = driver_shutdown, 384 * }; 385 * module_platform_driver(driver_driver); 386 * 387 * Drivers that want to support device unplugging (USB, DT overlay unload) should 388 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect 389 * regions that is accessing device resources to prevent use after they're 390 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one 391 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before 392 * drm_atomic_helper_shutdown() is called. This means that if the disable code 393 * paths are protected, they will not run on regular driver module unload, 394 * possibly leaving the hardware enabled. 395 */ 396 397 /** 398 * drm_put_dev - Unregister and release a DRM device 399 * @dev: DRM device 400 * 401 * Called at module unload time or when a PCI device is unplugged. 402 * 403 * Cleans up all DRM device, calling drm_lastclose(). 404 * 405 * Note: Use of this function is deprecated. It will eventually go away 406 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly 407 * instead to make sure that the device isn't userspace accessible any more 408 * while teardown is in progress, ensuring that userspace can't access an 409 * inconsistent state. 410 */ 411 void drm_put_dev(struct drm_device *dev) 412 { 413 DRM_DEBUG("\n"); 414 415 if (!dev) { 416 DRM_ERROR("cleanup called no dev\n"); 417 return; 418 } 419 420 drm_dev_unregister(dev); 421 drm_dev_put(dev); 422 } 423 EXPORT_SYMBOL(drm_put_dev); 424 425 /** 426 * drm_dev_enter - Enter device critical section 427 * @dev: DRM device 428 * @idx: Pointer to index that will be passed to the matching drm_dev_exit() 429 * 430 * This function marks and protects the beginning of a section that should not 431 * be entered after the device has been unplugged. The section end is marked 432 * with drm_dev_exit(). Calls to this function can be nested. 433 * 434 * Returns: 435 * True if it is OK to enter the section, false otherwise. 436 */ 437 bool drm_dev_enter(struct drm_device *dev, int *idx) 438 { 439 *idx = srcu_read_lock(&drm_unplug_srcu); 440 441 if (dev->unplugged) { 442 srcu_read_unlock(&drm_unplug_srcu, *idx); 443 return false; 444 } 445 446 return true; 447 } 448 EXPORT_SYMBOL(drm_dev_enter); 449 450 /** 451 * drm_dev_exit - Exit device critical section 452 * @idx: index returned from drm_dev_enter() 453 * 454 * This function marks the end of a section that should not be entered after 455 * the device has been unplugged. 456 */ 457 void drm_dev_exit(int idx) 458 { 459 srcu_read_unlock(&drm_unplug_srcu, idx); 460 } 461 EXPORT_SYMBOL(drm_dev_exit); 462 463 /** 464 * drm_dev_unplug - unplug a DRM device 465 * @dev: DRM device 466 * 467 * This unplugs a hotpluggable DRM device, which makes it inaccessible to 468 * userspace operations. Entry-points can use drm_dev_enter() and 469 * drm_dev_exit() to protect device resources in a race free manner. This 470 * essentially unregisters the device like drm_dev_unregister(), but can be 471 * called while there are still open users of @dev. 472 */ 473 void drm_dev_unplug(struct drm_device *dev) 474 { 475 /* 476 * After synchronizing any critical read section is guaranteed to see 477 * the new value of ->unplugged, and any critical section which might 478 * still have seen the old value of ->unplugged is guaranteed to have 479 * finished. 480 */ 481 dev->unplugged = true; 482 synchronize_srcu(&drm_unplug_srcu); 483 484 drm_dev_unregister(dev); 485 486 /* Clear all CPU mappings pointing to this device */ 487 unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1); 488 } 489 EXPORT_SYMBOL(drm_dev_unplug); 490 491 /* 492 * DRM internal mount 493 * We want to be able to allocate our own "struct address_space" to control 494 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 495 * stand-alone address_space objects, so we need an underlying inode. As there 496 * is no way to allocate an independent inode easily, we need a fake internal 497 * VFS mount-point. 498 * 499 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 500 * frees it again. You are allowed to use iget() and iput() to get references to 501 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 502 * drm_fs_inode_free() call (which does not have to be the last iput()). 503 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 504 * between multiple inode-users. You could, technically, call 505 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 506 * iput(), but this way you'd end up with a new vfsmount for each inode. 507 */ 508 509 static int drm_fs_cnt; 510 static struct vfsmount *drm_fs_mnt; 511 512 static int drm_fs_init_fs_context(struct fs_context *fc) 513 { 514 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM; 515 } 516 517 static struct file_system_type drm_fs_type = { 518 .name = "drm", 519 .owner = THIS_MODULE, 520 .init_fs_context = drm_fs_init_fs_context, 521 .kill_sb = kill_anon_super, 522 }; 523 524 static struct inode *drm_fs_inode_new(void) 525 { 526 struct inode *inode; 527 int r; 528 529 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 530 if (r < 0) { 531 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 532 return ERR_PTR(r); 533 } 534 535 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 536 if (IS_ERR(inode)) 537 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 538 539 return inode; 540 } 541 542 static void drm_fs_inode_free(struct inode *inode) 543 { 544 if (inode) { 545 iput(inode); 546 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 547 } 548 } 549 550 /** 551 * DOC: component helper usage recommendations 552 * 553 * DRM drivers that drive hardware where a logical device consists of a pile of 554 * independent hardware blocks are recommended to use the :ref:`component helper 555 * library<component>`. For consistency and better options for code reuse the 556 * following guidelines apply: 557 * 558 * - The entire device initialization procedure should be run from the 559 * &component_master_ops.master_bind callback, starting with 560 * devm_drm_dev_alloc(), then binding all components with 561 * component_bind_all() and finishing with drm_dev_register(). 562 * 563 * - The opaque pointer passed to all components through component_bind_all() 564 * should point at &struct drm_device of the device instance, not some driver 565 * specific private structure. 566 * 567 * - The component helper fills the niche where further standardization of 568 * interfaces is not practical. When there already is, or will be, a 569 * standardized interface like &drm_bridge or &drm_panel, providing its own 570 * functions to find such components at driver load time, like 571 * drm_of_find_panel_or_bridge(), then the component helper should not be 572 * used. 573 */ 574 575 static void drm_dev_init_release(struct drm_device *dev, void *res) 576 { 577 drm_legacy_ctxbitmap_cleanup(dev); 578 drm_legacy_remove_map_hash(dev); 579 drm_fs_inode_free(dev->anon_inode); 580 581 put_device(dev->dev); 582 /* Prevent use-after-free in drm_managed_release when debugging is 583 * enabled. Slightly awkward, but can't really be helped. */ 584 dev->dev = NULL; 585 mutex_destroy(&dev->master_mutex); 586 mutex_destroy(&dev->clientlist_mutex); 587 mutex_destroy(&dev->filelist_mutex); 588 mutex_destroy(&dev->struct_mutex); 589 mutex_destroy(&dev->debugfs_mutex); 590 drm_legacy_destroy_members(dev); 591 } 592 593 static int drm_dev_init(struct drm_device *dev, 594 const struct drm_driver *driver, 595 struct device *parent) 596 { 597 struct inode *inode; 598 int ret; 599 600 if (!drm_core_init_complete) { 601 DRM_ERROR("DRM core is not initialized\n"); 602 return -ENODEV; 603 } 604 605 if (WARN_ON(!parent)) 606 return -EINVAL; 607 608 kref_init(&dev->ref); 609 dev->dev = get_device(parent); 610 dev->driver = driver; 611 612 INIT_LIST_HEAD(&dev->managed.resources); 613 spin_lock_init(&dev->managed.lock); 614 615 /* no per-device feature limits by default */ 616 dev->driver_features = ~0u; 617 618 if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL) && 619 (drm_core_check_feature(dev, DRIVER_RENDER) || 620 drm_core_check_feature(dev, DRIVER_MODESET))) { 621 DRM_ERROR("DRM driver can't be both a compute acceleration and graphics driver\n"); 622 return -EINVAL; 623 } 624 625 drm_legacy_init_members(dev); 626 INIT_LIST_HEAD(&dev->filelist); 627 INIT_LIST_HEAD(&dev->filelist_internal); 628 INIT_LIST_HEAD(&dev->clientlist); 629 INIT_LIST_HEAD(&dev->vblank_event_list); 630 INIT_LIST_HEAD(&dev->debugfs_list); 631 632 spin_lock_init(&dev->event_lock); 633 mutex_init(&dev->struct_mutex); 634 mutex_init(&dev->filelist_mutex); 635 mutex_init(&dev->clientlist_mutex); 636 mutex_init(&dev->master_mutex); 637 mutex_init(&dev->debugfs_mutex); 638 639 ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL); 640 if (ret) 641 return ret; 642 643 inode = drm_fs_inode_new(); 644 if (IS_ERR(inode)) { 645 ret = PTR_ERR(inode); 646 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 647 goto err; 648 } 649 650 dev->anon_inode = inode; 651 652 if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) { 653 ret = drm_minor_alloc(dev, DRM_MINOR_ACCEL); 654 if (ret) 655 goto err; 656 } else { 657 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 658 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 659 if (ret) 660 goto err; 661 } 662 663 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); 664 if (ret) 665 goto err; 666 } 667 668 ret = drm_legacy_create_map_hash(dev); 669 if (ret) 670 goto err; 671 672 drm_legacy_ctxbitmap_init(dev); 673 674 if (drm_core_check_feature(dev, DRIVER_GEM)) { 675 ret = drm_gem_init(dev); 676 if (ret) { 677 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 678 goto err; 679 } 680 } 681 682 dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL); 683 if (!dev->unique) { 684 ret = -ENOMEM; 685 goto err; 686 } 687 688 return 0; 689 690 err: 691 drm_managed_release(dev); 692 693 return ret; 694 } 695 696 static void devm_drm_dev_init_release(void *data) 697 { 698 drm_dev_put(data); 699 } 700 701 static int devm_drm_dev_init(struct device *parent, 702 struct drm_device *dev, 703 const struct drm_driver *driver) 704 { 705 int ret; 706 707 ret = drm_dev_init(dev, driver, parent); 708 if (ret) 709 return ret; 710 711 return devm_add_action_or_reset(parent, 712 devm_drm_dev_init_release, dev); 713 } 714 715 void *__devm_drm_dev_alloc(struct device *parent, 716 const struct drm_driver *driver, 717 size_t size, size_t offset) 718 { 719 void *container; 720 struct drm_device *drm; 721 int ret; 722 723 container = kzalloc(size, GFP_KERNEL); 724 if (!container) 725 return ERR_PTR(-ENOMEM); 726 727 drm = container + offset; 728 ret = devm_drm_dev_init(parent, drm, driver); 729 if (ret) { 730 kfree(container); 731 return ERR_PTR(ret); 732 } 733 drmm_add_final_kfree(drm, container); 734 735 return container; 736 } 737 EXPORT_SYMBOL(__devm_drm_dev_alloc); 738 739 /** 740 * drm_dev_alloc - Allocate new DRM device 741 * @driver: DRM driver to allocate device for 742 * @parent: Parent device object 743 * 744 * This is the deprecated version of devm_drm_dev_alloc(), which does not support 745 * subclassing through embedding the struct &drm_device in a driver private 746 * structure, and which does not support automatic cleanup through devres. 747 * 748 * RETURNS: 749 * Pointer to new DRM device, or ERR_PTR on failure. 750 */ 751 struct drm_device *drm_dev_alloc(const struct drm_driver *driver, 752 struct device *parent) 753 { 754 struct drm_device *dev; 755 int ret; 756 757 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 758 if (!dev) 759 return ERR_PTR(-ENOMEM); 760 761 ret = drm_dev_init(dev, driver, parent); 762 if (ret) { 763 kfree(dev); 764 return ERR_PTR(ret); 765 } 766 767 drmm_add_final_kfree(dev, dev); 768 769 return dev; 770 } 771 EXPORT_SYMBOL(drm_dev_alloc); 772 773 static void drm_dev_release(struct kref *ref) 774 { 775 struct drm_device *dev = container_of(ref, struct drm_device, ref); 776 777 if (dev->driver->release) 778 dev->driver->release(dev); 779 780 drm_managed_release(dev); 781 782 kfree(dev->managed.final_kfree); 783 } 784 785 /** 786 * drm_dev_get - Take reference of a DRM device 787 * @dev: device to take reference of or NULL 788 * 789 * This increases the ref-count of @dev by one. You *must* already own a 790 * reference when calling this. Use drm_dev_put() to drop this reference 791 * again. 792 * 793 * This function never fails. However, this function does not provide *any* 794 * guarantee whether the device is alive or running. It only provides a 795 * reference to the object and the memory associated with it. 796 */ 797 void drm_dev_get(struct drm_device *dev) 798 { 799 if (dev) 800 kref_get(&dev->ref); 801 } 802 EXPORT_SYMBOL(drm_dev_get); 803 804 /** 805 * drm_dev_put - Drop reference of a DRM device 806 * @dev: device to drop reference of or NULL 807 * 808 * This decreases the ref-count of @dev by one. The device is destroyed if the 809 * ref-count drops to zero. 810 */ 811 void drm_dev_put(struct drm_device *dev) 812 { 813 if (dev) 814 kref_put(&dev->ref, drm_dev_release); 815 } 816 EXPORT_SYMBOL(drm_dev_put); 817 818 static int create_compat_control_link(struct drm_device *dev) 819 { 820 struct drm_minor *minor; 821 char *name; 822 int ret; 823 824 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 825 return 0; 826 827 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 828 if (!minor) 829 return 0; 830 831 /* 832 * Some existing userspace out there uses the existing of the controlD* 833 * sysfs files to figure out whether it's a modeset driver. It only does 834 * readdir, hence a symlink is sufficient (and the least confusing 835 * option). Otherwise controlD* is entirely unused. 836 * 837 * Old controlD chardev have been allocated in the range 838 * 64-127. 839 */ 840 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 841 if (!name) 842 return -ENOMEM; 843 844 ret = sysfs_create_link(minor->kdev->kobj.parent, 845 &minor->kdev->kobj, 846 name); 847 848 kfree(name); 849 850 return ret; 851 } 852 853 static void remove_compat_control_link(struct drm_device *dev) 854 { 855 struct drm_minor *minor; 856 char *name; 857 858 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 859 return; 860 861 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 862 if (!minor) 863 return; 864 865 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 866 if (!name) 867 return; 868 869 sysfs_remove_link(minor->kdev->kobj.parent, name); 870 871 kfree(name); 872 } 873 874 /** 875 * drm_dev_register - Register DRM device 876 * @dev: Device to register 877 * @flags: Flags passed to the driver's .load() function 878 * 879 * Register the DRM device @dev with the system, advertise device to user-space 880 * and start normal device operation. @dev must be initialized via drm_dev_init() 881 * previously. 882 * 883 * Never call this twice on any device! 884 * 885 * NOTE: To ensure backward compatibility with existing drivers method this 886 * function calls the &drm_driver.load method after registering the device 887 * nodes, creating race conditions. Usage of the &drm_driver.load methods is 888 * therefore deprecated, drivers must perform all initialization before calling 889 * drm_dev_register(). 890 * 891 * RETURNS: 892 * 0 on success, negative error code on failure. 893 */ 894 int drm_dev_register(struct drm_device *dev, unsigned long flags) 895 { 896 const struct drm_driver *driver = dev->driver; 897 int ret; 898 899 if (!driver->load) 900 drm_mode_config_validate(dev); 901 902 WARN_ON(!dev->managed.final_kfree); 903 904 if (drm_dev_needs_global_mutex(dev)) 905 mutex_lock(&drm_global_mutex); 906 907 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 908 if (ret) 909 goto err_minors; 910 911 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); 912 if (ret) 913 goto err_minors; 914 915 ret = drm_minor_register(dev, DRM_MINOR_ACCEL); 916 if (ret) 917 goto err_minors; 918 919 ret = create_compat_control_link(dev); 920 if (ret) 921 goto err_minors; 922 923 dev->registered = true; 924 925 if (driver->load) { 926 ret = driver->load(dev, flags); 927 if (ret) 928 goto err_minors; 929 } 930 931 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 932 ret = drm_modeset_register_all(dev); 933 if (ret) 934 goto err_unload; 935 } 936 937 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 938 driver->name, driver->major, driver->minor, 939 driver->patchlevel, driver->date, 940 dev->dev ? dev_name(dev->dev) : "virtual device", 941 dev->primary ? dev->primary->index : dev->accel->index); 942 943 goto out_unlock; 944 945 err_unload: 946 if (dev->driver->unload) 947 dev->driver->unload(dev); 948 err_minors: 949 remove_compat_control_link(dev); 950 drm_minor_unregister(dev, DRM_MINOR_ACCEL); 951 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 952 drm_minor_unregister(dev, DRM_MINOR_RENDER); 953 out_unlock: 954 if (drm_dev_needs_global_mutex(dev)) 955 mutex_unlock(&drm_global_mutex); 956 return ret; 957 } 958 EXPORT_SYMBOL(drm_dev_register); 959 960 /** 961 * drm_dev_unregister - Unregister DRM device 962 * @dev: Device to unregister 963 * 964 * Unregister the DRM device from the system. This does the reverse of 965 * drm_dev_register() but does not deallocate the device. The caller must call 966 * drm_dev_put() to drop their final reference, unless it is managed with devres 967 * (as devices allocated with devm_drm_dev_alloc() are), in which case there is 968 * already an unwind action registered. 969 * 970 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), 971 * which can be called while there are still open users of @dev. 972 * 973 * This should be called first in the device teardown code to make sure 974 * userspace can't access the device instance any more. 975 */ 976 void drm_dev_unregister(struct drm_device *dev) 977 { 978 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 979 drm_lastclose(dev); 980 981 dev->registered = false; 982 983 drm_client_dev_unregister(dev); 984 985 if (drm_core_check_feature(dev, DRIVER_MODESET)) 986 drm_modeset_unregister_all(dev); 987 988 if (dev->driver->unload) 989 dev->driver->unload(dev); 990 991 drm_legacy_pci_agp_destroy(dev); 992 drm_legacy_rmmaps(dev); 993 994 remove_compat_control_link(dev); 995 drm_minor_unregister(dev, DRM_MINOR_ACCEL); 996 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 997 drm_minor_unregister(dev, DRM_MINOR_RENDER); 998 } 999 EXPORT_SYMBOL(drm_dev_unregister); 1000 1001 /* 1002 * DRM Core 1003 * The DRM core module initializes all global DRM objects and makes them 1004 * available to drivers. Once setup, drivers can probe their respective 1005 * devices. 1006 * Currently, core management includes: 1007 * - The "DRM-Global" key/value database 1008 * - Global ID management for connectors 1009 * - DRM major number allocation 1010 * - DRM minor management 1011 * - DRM sysfs class 1012 * - DRM debugfs root 1013 * 1014 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 1015 * interface registered on a DRM device, you can request minor numbers from DRM 1016 * core. DRM core takes care of major-number management and char-dev 1017 * registration. A stub ->open() callback forwards any open() requests to the 1018 * registered minor. 1019 */ 1020 1021 static int drm_stub_open(struct inode *inode, struct file *filp) 1022 { 1023 const struct file_operations *new_fops; 1024 struct drm_minor *minor; 1025 int err; 1026 1027 DRM_DEBUG("\n"); 1028 1029 minor = drm_minor_acquire(iminor(inode)); 1030 if (IS_ERR(minor)) 1031 return PTR_ERR(minor); 1032 1033 new_fops = fops_get(minor->dev->driver->fops); 1034 if (!new_fops) { 1035 err = -ENODEV; 1036 goto out; 1037 } 1038 1039 replace_fops(filp, new_fops); 1040 if (filp->f_op->open) 1041 err = filp->f_op->open(inode, filp); 1042 else 1043 err = 0; 1044 1045 out: 1046 drm_minor_release(minor); 1047 1048 return err; 1049 } 1050 1051 static const struct file_operations drm_stub_fops = { 1052 .owner = THIS_MODULE, 1053 .open = drm_stub_open, 1054 .llseek = noop_llseek, 1055 }; 1056 1057 static void drm_core_exit(void) 1058 { 1059 drm_privacy_screen_lookup_exit(); 1060 accel_core_exit(); 1061 unregister_chrdev(DRM_MAJOR, "drm"); 1062 debugfs_remove(drm_debugfs_root); 1063 drm_sysfs_destroy(); 1064 WARN_ON(!xa_empty(&drm_minors_xa)); 1065 drm_connector_ida_destroy(); 1066 } 1067 1068 static int __init drm_core_init(void) 1069 { 1070 int ret; 1071 1072 drm_connector_ida_init(); 1073 drm_memcpy_init_early(); 1074 1075 ret = drm_sysfs_init(); 1076 if (ret < 0) { 1077 DRM_ERROR("Cannot create DRM class: %d\n", ret); 1078 goto error; 1079 } 1080 1081 drm_debugfs_root = debugfs_create_dir("dri", NULL); 1082 1083 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); 1084 if (ret < 0) 1085 goto error; 1086 1087 ret = accel_core_init(); 1088 if (ret < 0) 1089 goto error; 1090 1091 drm_privacy_screen_lookup_init(); 1092 1093 drm_core_init_complete = true; 1094 1095 DRM_DEBUG("Initialized\n"); 1096 return 0; 1097 1098 error: 1099 drm_core_exit(); 1100 return ret; 1101 } 1102 1103 module_init(drm_core_init); 1104 module_exit(drm_core_exit); 1105