121d70354SDave Airlie /*
221d70354SDave Airlie * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
321d70354SDave Airlie *
421d70354SDave Airlie * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
521d70354SDave Airlie * All Rights Reserved.
621d70354SDave Airlie *
721d70354SDave Airlie * Author Rickard E. (Rik) Faith <faith@valinux.com>
821d70354SDave Airlie *
921d70354SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a
1021d70354SDave Airlie * copy of this software and associated documentation files (the "Software"),
1121d70354SDave Airlie * to deal in the Software without restriction, including without limitation
1221d70354SDave Airlie * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1321d70354SDave Airlie * and/or sell copies of the Software, and to permit persons to whom the
1421d70354SDave Airlie * Software is furnished to do so, subject to the following conditions:
1521d70354SDave Airlie *
1621d70354SDave Airlie * The above copyright notice and this permission notice (including the next
1721d70354SDave Airlie * paragraph) shall be included in all copies or substantial portions of the
1821d70354SDave Airlie * Software.
1921d70354SDave Airlie *
2021d70354SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2121d70354SDave Airlie * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2221d70354SDave Airlie * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
2321d70354SDave Airlie * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
2421d70354SDave Airlie * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2521d70354SDave Airlie * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2621d70354SDave Airlie * DEALINGS IN THE SOFTWARE.
2721d70354SDave Airlie */
2821d70354SDave Airlie
2921d70354SDave Airlie #include <linux/debugfs.h>
3021d70354SDave Airlie #include <linux/fs.h>
3121d70354SDave Airlie #include <linux/module.h>
3221d70354SDave Airlie #include <linux/moduleparam.h>
3321d70354SDave Airlie #include <linux/mount.h>
344a457910SDavid Howells #include <linux/pseudo_fs.h>
3521d70354SDave Airlie #include <linux/slab.h>
36bee330f3SNoralf Trønnes #include <linux/srcu.h>
3785e634bcSDaniel Vetter
387428ff70SOded Gabbay #include <drm/drm_accel.h>
39b7e32befSThomas Hellström #include <drm/drm_cache.h>
40c76f0f7cSNoralf Trønnes #include <drm/drm_client.h>
410500c04eSSam Ravnborg #include <drm/drm_color_mgmt.h>
4285e634bcSDaniel Vetter #include <drm/drm_drv.h>
430500c04eSSam Ravnborg #include <drm/drm_file.h>
446f365e56SDaniel Vetter #include <drm/drm_managed.h>
450500c04eSSam Ravnborg #include <drm/drm_mode_object.h>
460500c04eSSam Ravnborg #include <drm/drm_print.h>
47a1a98689SHans de Goede #include <drm/drm_privacy_screen_machine.h>
4885e634bcSDaniel Vetter
4979190ea2SBenjamin Gaignard #include "drm_crtc_internal.h"
5067d0ec4eSDaniel Vetter #include "drm_internal.h"
510500c04eSSam Ravnborg #include "drm_legacy.h"
5221d70354SDave Airlie
5382d5e73fSDavid Herrmann MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
5482d5e73fSDavid Herrmann MODULE_DESCRIPTION("DRM shared core routines");
5521d70354SDave Airlie MODULE_LICENSE("GPL and additional rights");
5621d70354SDave Airlie
5721d70354SDave Airlie static DEFINE_SPINLOCK(drm_minor_lock);
5821d70354SDave Airlie static struct idr drm_minors_idr;
5921d70354SDave Airlie
60371c2279SAlexandru Moise /*
61371c2279SAlexandru Moise * If the drm core fails to init for whatever reason,
62371c2279SAlexandru Moise * we should prevent any drivers from registering with it.
63371c2279SAlexandru Moise * It's best to check this at drm_dev_init(), as some drivers
64371c2279SAlexandru Moise * prefer to embed struct drm_device into their own device
65371c2279SAlexandru Moise * structure and call drm_dev_init() themselves.
66371c2279SAlexandru Moise */
67c00697b5STian Tao static bool drm_core_init_complete;
68371c2279SAlexandru Moise
6921d70354SDave Airlie static struct dentry *drm_debugfs_root;
7021d70354SDave Airlie
71bee330f3SNoralf Trønnes DEFINE_STATIC_SRCU(drm_unplug_srcu);
72bee330f3SNoralf Trønnes
7321d70354SDave Airlie /*
7421d70354SDave Airlie * DRM Minors
7521d70354SDave Airlie * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
7621d70354SDave Airlie * of them is represented by a drm_minor object. Depending on the capabilities
7721d70354SDave Airlie * of the device-driver, different interfaces are registered.
7821d70354SDave Airlie *
7921d70354SDave Airlie * Minors can be accessed via dev->$minor_name. This pointer is either
8021d70354SDave Airlie * NULL or a valid drm_minor pointer and stays valid as long as the device is
8121d70354SDave Airlie * valid. This means, DRM minors have the same life-time as the underlying
8221d70354SDave Airlie * device. However, this doesn't mean that the minor is active. Minors are
8321d70354SDave Airlie * registered and unregistered dynamically according to device-state.
8421d70354SDave Airlie */
8521d70354SDave Airlie
drm_minor_get_slot(struct drm_device * dev,enum drm_minor_type type)8621d70354SDave Airlie static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
87e48aedf7SSimon Ser enum drm_minor_type type)
8821d70354SDave Airlie {
8921d70354SDave Airlie switch (type) {
90a3ccc461SDavid Herrmann case DRM_MINOR_PRIMARY:
9121d70354SDave Airlie return &dev->primary;
9221d70354SDave Airlie case DRM_MINOR_RENDER:
9321d70354SDave Airlie return &dev->render;
947428ff70SOded Gabbay case DRM_MINOR_ACCEL:
957428ff70SOded Gabbay return &dev->accel;
9621d70354SDave Airlie default:
973bd07ccdSJoe Moriarty BUG();
9821d70354SDave Airlie }
9921d70354SDave Airlie }
10021d70354SDave Airlie
drm_minor_alloc_release(struct drm_device * dev,void * data)101f96306f9SDaniel Vetter static void drm_minor_alloc_release(struct drm_device *dev, void *data)
102f96306f9SDaniel Vetter {
103f96306f9SDaniel Vetter struct drm_minor *minor = data;
104f96306f9SDaniel Vetter unsigned long flags;
105f96306f9SDaniel Vetter
106c3b790eaSDaniel Vetter WARN_ON(dev != minor->dev);
107c3b790eaSDaniel Vetter
108f96306f9SDaniel Vetter put_device(minor->kdev);
109f96306f9SDaniel Vetter
1107428ff70SOded Gabbay if (minor->type == DRM_MINOR_ACCEL) {
1117428ff70SOded Gabbay accel_minor_remove(minor->index);
1127428ff70SOded Gabbay } else {
113f96306f9SDaniel Vetter spin_lock_irqsave(&drm_minor_lock, flags);
114f96306f9SDaniel Vetter idr_remove(&drm_minors_idr, minor->index);
115f96306f9SDaniel Vetter spin_unlock_irqrestore(&drm_minor_lock, flags);
116f96306f9SDaniel Vetter }
1177428ff70SOded Gabbay }
118f96306f9SDaniel Vetter
drm_minor_alloc(struct drm_device * dev,enum drm_minor_type type)119e48aedf7SSimon Ser static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
12021d70354SDave Airlie {
12121d70354SDave Airlie struct drm_minor *minor;
12221d70354SDave Airlie unsigned long flags;
12321d70354SDave Airlie int r;
12421d70354SDave Airlie
125f96306f9SDaniel Vetter minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
12621d70354SDave Airlie if (!minor)
12721d70354SDave Airlie return -ENOMEM;
12821d70354SDave Airlie
12921d70354SDave Airlie minor->type = type;
13021d70354SDave Airlie minor->dev = dev;
13121d70354SDave Airlie
13221d70354SDave Airlie idr_preload(GFP_KERNEL);
1337428ff70SOded Gabbay if (type == DRM_MINOR_ACCEL) {
1347428ff70SOded Gabbay r = accel_minor_alloc();
1357428ff70SOded Gabbay } else {
13621d70354SDave Airlie spin_lock_irqsave(&drm_minor_lock, flags);
13721d70354SDave Airlie r = idr_alloc(&drm_minors_idr,
13821d70354SDave Airlie NULL,
13921d70354SDave Airlie 64 * type,
14021d70354SDave Airlie 64 * (type + 1),
14121d70354SDave Airlie GFP_NOWAIT);
14221d70354SDave Airlie spin_unlock_irqrestore(&drm_minor_lock, flags);
1437428ff70SOded Gabbay }
14421d70354SDave Airlie idr_preload_end();
14521d70354SDave Airlie
14621d70354SDave Airlie if (r < 0)
147f96306f9SDaniel Vetter return r;
14821d70354SDave Airlie
14921d70354SDave Airlie minor->index = r;
15021d70354SDave Airlie
151f96306f9SDaniel Vetter r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
152f96306f9SDaniel Vetter if (r)
153f96306f9SDaniel Vetter return r;
154f96306f9SDaniel Vetter
15521d70354SDave Airlie minor->kdev = drm_sysfs_minor_alloc(minor);
156f96306f9SDaniel Vetter if (IS_ERR(minor->kdev))
157f96306f9SDaniel Vetter return PTR_ERR(minor->kdev);
15821d70354SDave Airlie
15921d70354SDave Airlie *drm_minor_get_slot(dev, type) = minor;
16021d70354SDave Airlie return 0;
16121d70354SDave Airlie }
16221d70354SDave Airlie
drm_minor_register(struct drm_device * dev,enum drm_minor_type type)163e48aedf7SSimon Ser static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
16421d70354SDave Airlie {
16521d70354SDave Airlie struct drm_minor *minor;
16621d70354SDave Airlie unsigned long flags;
16721d70354SDave Airlie int ret;
16821d70354SDave Airlie
16921d70354SDave Airlie DRM_DEBUG("\n");
17021d70354SDave Airlie
17121d70354SDave Airlie minor = *drm_minor_get_slot(dev, type);
17221d70354SDave Airlie if (!minor)
17321d70354SDave Airlie return 0;
17421d70354SDave Airlie
1757428ff70SOded Gabbay if (minor->type == DRM_MINOR_ACCEL) {
1767428ff70SOded Gabbay accel_debugfs_init(minor, minor->index);
1777428ff70SOded Gabbay } else {
17821d70354SDave Airlie ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
17921d70354SDave Airlie if (ret) {
18021d70354SDave Airlie DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
181a67834f8SNoralf Trønnes goto err_debugfs;
18221d70354SDave Airlie }
1837428ff70SOded Gabbay }
18421d70354SDave Airlie
18521d70354SDave Airlie ret = device_add(minor->kdev);
18621d70354SDave Airlie if (ret)
18721d70354SDave Airlie goto err_debugfs;
18821d70354SDave Airlie
18921d70354SDave Airlie /* replace NULL with @minor so lookups will succeed from now on */
1907428ff70SOded Gabbay if (minor->type == DRM_MINOR_ACCEL) {
1917428ff70SOded Gabbay accel_minor_replace(minor, minor->index);
1927428ff70SOded Gabbay } else {
19321d70354SDave Airlie spin_lock_irqsave(&drm_minor_lock, flags);
19421d70354SDave Airlie idr_replace(&drm_minors_idr, minor, minor->index);
19521d70354SDave Airlie spin_unlock_irqrestore(&drm_minor_lock, flags);
1967428ff70SOded Gabbay }
19721d70354SDave Airlie
19821d70354SDave Airlie DRM_DEBUG("new minor registered %d\n", minor->index);
19921d70354SDave Airlie return 0;
20021d70354SDave Airlie
20121d70354SDave Airlie err_debugfs:
20221d70354SDave Airlie drm_debugfs_cleanup(minor);
20321d70354SDave Airlie return ret;
20421d70354SDave Airlie }
20521d70354SDave Airlie
drm_minor_unregister(struct drm_device * dev,enum drm_minor_type type)206e48aedf7SSimon Ser static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
20721d70354SDave Airlie {
20821d70354SDave Airlie struct drm_minor *minor;
20921d70354SDave Airlie unsigned long flags;
21021d70354SDave Airlie
21121d70354SDave Airlie minor = *drm_minor_get_slot(dev, type);
21221d70354SDave Airlie if (!minor || !device_is_registered(minor->kdev))
21321d70354SDave Airlie return;
21421d70354SDave Airlie
21521d70354SDave Airlie /* replace @minor with NULL so lookups will fail from now on */
2167428ff70SOded Gabbay if (minor->type == DRM_MINOR_ACCEL) {
2177428ff70SOded Gabbay accel_minor_replace(NULL, minor->index);
2187428ff70SOded Gabbay } else {
21921d70354SDave Airlie spin_lock_irqsave(&drm_minor_lock, flags);
22021d70354SDave Airlie idr_replace(&drm_minors_idr, NULL, minor->index);
22121d70354SDave Airlie spin_unlock_irqrestore(&drm_minor_lock, flags);
2227428ff70SOded Gabbay }
22321d70354SDave Airlie
22421d70354SDave Airlie device_del(minor->kdev);
22521d70354SDave Airlie dev_set_drvdata(minor->kdev, NULL); /* safety belt */
22621d70354SDave Airlie drm_debugfs_cleanup(minor);
22721d70354SDave Airlie }
22821d70354SDave Airlie
22985e634bcSDaniel Vetter /*
23021d70354SDave Airlie * Looks up the given minor-ID and returns the respective DRM-minor object. The
23121d70354SDave Airlie * refence-count of the underlying device is increased so you must release this
23221d70354SDave Airlie * object with drm_minor_release().
23321d70354SDave Airlie *
23421d70354SDave Airlie * As long as you hold this minor, it is guaranteed that the object and the
23521d70354SDave Airlie * minor->dev pointer will stay valid! However, the device may get unplugged and
23621d70354SDave Airlie * unregistered while you hold the minor.
23721d70354SDave Airlie */
drm_minor_acquire(unsigned int minor_id)23821d70354SDave Airlie struct drm_minor *drm_minor_acquire(unsigned int minor_id)
23921d70354SDave Airlie {
24021d70354SDave Airlie struct drm_minor *minor;
24121d70354SDave Airlie unsigned long flags;
24221d70354SDave Airlie
24321d70354SDave Airlie spin_lock_irqsave(&drm_minor_lock, flags);
24421d70354SDave Airlie minor = idr_find(&drm_minors_idr, minor_id);
24521d70354SDave Airlie if (minor)
2469a96f550SAishwarya Pant drm_dev_get(minor->dev);
24721d70354SDave Airlie spin_unlock_irqrestore(&drm_minor_lock, flags);
24821d70354SDave Airlie
24921d70354SDave Airlie if (!minor) {
25021d70354SDave Airlie return ERR_PTR(-ENODEV);
251c07dcd61SDaniel Vetter } else if (drm_dev_is_unplugged(minor->dev)) {
2529a96f550SAishwarya Pant drm_dev_put(minor->dev);
25321d70354SDave Airlie return ERR_PTR(-ENODEV);
25421d70354SDave Airlie }
25521d70354SDave Airlie
25621d70354SDave Airlie return minor;
25721d70354SDave Airlie }
25821d70354SDave Airlie
drm_minor_release(struct drm_minor * minor)25921d70354SDave Airlie void drm_minor_release(struct drm_minor *minor)
26021d70354SDave Airlie {
2619a96f550SAishwarya Pant drm_dev_put(minor->dev);
26221d70354SDave Airlie }
26321d70354SDave Airlie
26421d70354SDave Airlie /**
2656e3f797cSDaniel Vetter * DOC: driver instance overview
2666e3f797cSDaniel Vetter *
267ea0dd85aSDaniel Vetter * A device instance for a drm driver is represented by &struct drm_device. This
2684c8e84b8SDaniel Vetter * is allocated and initialized with devm_drm_dev_alloc(), usually from
2694c8e84b8SDaniel Vetter * bus-specific ->probe() callbacks implemented by the driver. The driver then
2704c8e84b8SDaniel Vetter * needs to initialize all the various subsystems for the drm device like memory
2714c8e84b8SDaniel Vetter * management, vblank handling, modesetting support and initial output
2724c8e84b8SDaniel Vetter * configuration plus obviously initialize all the corresponding hardware bits.
2734c8e84b8SDaniel Vetter * Finally when everything is up and running and ready for userspace the device
2744c8e84b8SDaniel Vetter * instance can be published using drm_dev_register().
2756e3f797cSDaniel Vetter *
2760ae865efSCai Huoqing * There is also deprecated support for initializing device instances using
277ef40cbf9SDaniel Vetter * bus-specific helpers and the &drm_driver.load callback. But due to
2786e3f797cSDaniel Vetter * backwards-compatibility needs the device instance have to be published too
2796e3f797cSDaniel Vetter * early, which requires unpretty global locking to make safe and is therefore
2806e3f797cSDaniel Vetter * only support for existing drivers not yet converted to the new scheme.
2816e3f797cSDaniel Vetter *
2826e3f797cSDaniel Vetter * When cleaning up a device instance everything needs to be done in reverse:
2836e3f797cSDaniel Vetter * First unpublish the device instance with drm_dev_unregister(). Then clean up
2846e3f797cSDaniel Vetter * any other resources allocated at device initialization and drop the driver's
2859a96f550SAishwarya Pant * reference to &drm_device using drm_dev_put().
2866e3f797cSDaniel Vetter *
2879e1ed9fbSDaniel Vetter * Note that any allocation or resource which is visible to userspace must be
2889e1ed9fbSDaniel Vetter * released only when the final drm_dev_put() is called, and not when the
2899e1ed9fbSDaniel Vetter * driver is unbound from the underlying physical struct &device. Best to use
2909e1ed9fbSDaniel Vetter * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
2919e1ed9fbSDaniel Vetter * related functions.
2929e1ed9fbSDaniel Vetter *
2939e1ed9fbSDaniel Vetter * devres managed resources like devm_kmalloc() can only be used for resources
2949e1ed9fbSDaniel Vetter * directly related to the underlying hardware device, and only used in code
2959e1ed9fbSDaniel Vetter * paths fully protected by drm_dev_enter() and drm_dev_exit().
296de99f060SNoralf Trønnes *
297de99f060SNoralf Trønnes * Display driver example
298de99f060SNoralf Trønnes * ~~~~~~~~~~~~~~~~~~~~~~
299de99f060SNoralf Trønnes *
300de99f060SNoralf Trønnes * The following example shows a typical structure of a DRM display driver.
301de99f060SNoralf Trønnes * The example focus on the probe() function and the other functions that is
3024c8e84b8SDaniel Vetter * almost always present and serves as a demonstration of devm_drm_dev_alloc().
303de99f060SNoralf Trønnes *
304de99f060SNoralf Trønnes * .. code-block:: c
305de99f060SNoralf Trønnes *
306de99f060SNoralf Trønnes * struct driver_device {
307de99f060SNoralf Trønnes * struct drm_device drm;
308de99f060SNoralf Trønnes * void *userspace_facing;
309de99f060SNoralf Trønnes * struct clk *pclk;
310de99f060SNoralf Trønnes * };
311de99f060SNoralf Trønnes *
3128f5c7aa0SDaniel Vetter * static const struct drm_driver driver_drm_driver = {
313de99f060SNoralf Trønnes * [...]
314de99f060SNoralf Trønnes * };
315de99f060SNoralf Trønnes *
316de99f060SNoralf Trønnes * static int driver_probe(struct platform_device *pdev)
317de99f060SNoralf Trønnes * {
318de99f060SNoralf Trønnes * struct driver_device *priv;
319de99f060SNoralf Trønnes * struct drm_device *drm;
320de99f060SNoralf Trønnes * int ret;
321de99f060SNoralf Trønnes *
3224c8e84b8SDaniel Vetter * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
3234c8e84b8SDaniel Vetter * struct driver_device, drm);
3244c8e84b8SDaniel Vetter * if (IS_ERR(priv))
3254c8e84b8SDaniel Vetter * return PTR_ERR(priv);
326de99f060SNoralf Trønnes * drm = &priv->drm;
327de99f060SNoralf Trønnes *
328c3b790eaSDaniel Vetter * ret = drmm_mode_config_init(drm);
329c3b790eaSDaniel Vetter * if (ret)
330c3b790eaSDaniel Vetter * return ret;
331de99f060SNoralf Trønnes *
3325dad34f3SDaniel Vetter * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
333de99f060SNoralf Trønnes * if (!priv->userspace_facing)
334de99f060SNoralf Trønnes * return -ENOMEM;
335de99f060SNoralf Trønnes *
336de99f060SNoralf Trønnes * priv->pclk = devm_clk_get(dev, "PCLK");
337de99f060SNoralf Trønnes * if (IS_ERR(priv->pclk))
338de99f060SNoralf Trønnes * return PTR_ERR(priv->pclk);
339de99f060SNoralf Trønnes *
34056d8d641SJonathan Neuschäfer * // Further setup, display pipeline etc
341de99f060SNoralf Trønnes *
342de99f060SNoralf Trønnes * platform_set_drvdata(pdev, drm);
343de99f060SNoralf Trønnes *
344de99f060SNoralf Trønnes * drm_mode_config_reset(drm);
345de99f060SNoralf Trønnes *
346de99f060SNoralf Trønnes * ret = drm_dev_register(drm);
347de99f060SNoralf Trønnes * if (ret)
348de99f060SNoralf Trønnes * return ret;
349de99f060SNoralf Trønnes *
350de99f060SNoralf Trønnes * drm_fbdev_generic_setup(drm, 32);
351de99f060SNoralf Trønnes *
352de99f060SNoralf Trønnes * return 0;
353de99f060SNoralf Trønnes * }
354de99f060SNoralf Trønnes *
35556d8d641SJonathan Neuschäfer * // This function is called before the devm_ resources are released
356de99f060SNoralf Trønnes * static int driver_remove(struct platform_device *pdev)
357de99f060SNoralf Trønnes * {
358de99f060SNoralf Trønnes * struct drm_device *drm = platform_get_drvdata(pdev);
359de99f060SNoralf Trønnes *
360de99f060SNoralf Trønnes * drm_dev_unregister(drm);
361de99f060SNoralf Trønnes * drm_atomic_helper_shutdown(drm)
362de99f060SNoralf Trønnes *
363de99f060SNoralf Trønnes * return 0;
364de99f060SNoralf Trønnes * }
365de99f060SNoralf Trønnes *
36656d8d641SJonathan Neuschäfer * // This function is called on kernel restart and shutdown
367de99f060SNoralf Trønnes * static void driver_shutdown(struct platform_device *pdev)
368de99f060SNoralf Trønnes * {
369de99f060SNoralf Trønnes * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
370de99f060SNoralf Trønnes * }
371de99f060SNoralf Trønnes *
372de99f060SNoralf Trønnes * static int __maybe_unused driver_pm_suspend(struct device *dev)
373de99f060SNoralf Trønnes * {
374de99f060SNoralf Trønnes * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
375de99f060SNoralf Trønnes * }
376de99f060SNoralf Trønnes *
377de99f060SNoralf Trønnes * static int __maybe_unused driver_pm_resume(struct device *dev)
378de99f060SNoralf Trønnes * {
379de99f060SNoralf Trønnes * drm_mode_config_helper_resume(dev_get_drvdata(dev));
380de99f060SNoralf Trønnes *
381de99f060SNoralf Trønnes * return 0;
382de99f060SNoralf Trønnes * }
383de99f060SNoralf Trønnes *
384de99f060SNoralf Trønnes * static const struct dev_pm_ops driver_pm_ops = {
385de99f060SNoralf Trønnes * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
386de99f060SNoralf Trønnes * };
387de99f060SNoralf Trønnes *
388de99f060SNoralf Trønnes * static struct platform_driver driver_driver = {
389de99f060SNoralf Trønnes * .driver = {
390de99f060SNoralf Trønnes * [...]
391de99f060SNoralf Trønnes * .pm = &driver_pm_ops,
392de99f060SNoralf Trønnes * },
393de99f060SNoralf Trønnes * .probe = driver_probe,
394de99f060SNoralf Trønnes * .remove = driver_remove,
395de99f060SNoralf Trønnes * .shutdown = driver_shutdown,
396de99f060SNoralf Trønnes * };
397de99f060SNoralf Trønnes * module_platform_driver(driver_driver);
398de99f060SNoralf Trønnes *
399de99f060SNoralf Trønnes * Drivers that want to support device unplugging (USB, DT overlay unload) should
400de99f060SNoralf Trønnes * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
401de99f060SNoralf Trønnes * regions that is accessing device resources to prevent use after they're
402de99f060SNoralf Trønnes * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
403de99f060SNoralf Trønnes * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
404de99f060SNoralf Trønnes * drm_atomic_helper_shutdown() is called. This means that if the disable code
405de99f060SNoralf Trønnes * paths are protected, they will not run on regular driver module unload,
4060ae865efSCai Huoqing * possibly leaving the hardware enabled.
4076e3f797cSDaniel Vetter */
4086e3f797cSDaniel Vetter
4096e3f797cSDaniel Vetter /**
41021d70354SDave Airlie * drm_put_dev - Unregister and release a DRM device
41121d70354SDave Airlie * @dev: DRM device
41221d70354SDave Airlie *
41321d70354SDave Airlie * Called at module unload time or when a PCI device is unplugged.
41421d70354SDave Airlie *
41521d70354SDave Airlie * Cleans up all DRM device, calling drm_lastclose().
4166e3f797cSDaniel Vetter *
4176e3f797cSDaniel Vetter * Note: Use of this function is deprecated. It will eventually go away
4189a96f550SAishwarya Pant * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
4196e3f797cSDaniel Vetter * instead to make sure that the device isn't userspace accessible any more
4206e3f797cSDaniel Vetter * while teardown is in progress, ensuring that userspace can't access an
4216e3f797cSDaniel Vetter * inconsistent state.
42221d70354SDave Airlie */
drm_put_dev(struct drm_device * dev)42321d70354SDave Airlie void drm_put_dev(struct drm_device *dev)
42421d70354SDave Airlie {
42521d70354SDave Airlie DRM_DEBUG("\n");
42621d70354SDave Airlie
42721d70354SDave Airlie if (!dev) {
42821d70354SDave Airlie DRM_ERROR("cleanup called no dev\n");
42921d70354SDave Airlie return;
43021d70354SDave Airlie }
43121d70354SDave Airlie
43221d70354SDave Airlie drm_dev_unregister(dev);
4339a96f550SAishwarya Pant drm_dev_put(dev);
43421d70354SDave Airlie }
43521d70354SDave Airlie EXPORT_SYMBOL(drm_put_dev);
43621d70354SDave Airlie
437bee330f3SNoralf Trønnes /**
438bee330f3SNoralf Trønnes * drm_dev_enter - Enter device critical section
439bee330f3SNoralf Trønnes * @dev: DRM device
440bee330f3SNoralf Trønnes * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
441bee330f3SNoralf Trønnes *
442bee330f3SNoralf Trønnes * This function marks and protects the beginning of a section that should not
443bee330f3SNoralf Trønnes * be entered after the device has been unplugged. The section end is marked
444bee330f3SNoralf Trønnes * with drm_dev_exit(). Calls to this function can be nested.
445bee330f3SNoralf Trønnes *
446bee330f3SNoralf Trønnes * Returns:
447bee330f3SNoralf Trønnes * True if it is OK to enter the section, false otherwise.
448bee330f3SNoralf Trønnes */
drm_dev_enter(struct drm_device * dev,int * idx)449bee330f3SNoralf Trønnes bool drm_dev_enter(struct drm_device *dev, int *idx)
450c07dcd61SDaniel Vetter {
451bee330f3SNoralf Trønnes *idx = srcu_read_lock(&drm_unplug_srcu);
452bee330f3SNoralf Trønnes
453bee330f3SNoralf Trønnes if (dev->unplugged) {
454bee330f3SNoralf Trønnes srcu_read_unlock(&drm_unplug_srcu, *idx);
455bee330f3SNoralf Trønnes return false;
456c07dcd61SDaniel Vetter }
457c07dcd61SDaniel Vetter
458bee330f3SNoralf Trønnes return true;
459bee330f3SNoralf Trønnes }
460bee330f3SNoralf Trønnes EXPORT_SYMBOL(drm_dev_enter);
461bee330f3SNoralf Trønnes
462bee330f3SNoralf Trønnes /**
463bee330f3SNoralf Trønnes * drm_dev_exit - Exit device critical section
464bee330f3SNoralf Trønnes * @idx: index returned from drm_dev_enter()
465bee330f3SNoralf Trønnes *
466bee330f3SNoralf Trønnes * This function marks the end of a section that should not be entered after
467bee330f3SNoralf Trønnes * the device has been unplugged.
468bee330f3SNoralf Trønnes */
drm_dev_exit(int idx)469bee330f3SNoralf Trønnes void drm_dev_exit(int idx)
470bee330f3SNoralf Trønnes {
471bee330f3SNoralf Trønnes srcu_read_unlock(&drm_unplug_srcu, idx);
472bee330f3SNoralf Trønnes }
473bee330f3SNoralf Trønnes EXPORT_SYMBOL(drm_dev_exit);
474bee330f3SNoralf Trønnes
475c07dcd61SDaniel Vetter /**
476c07dcd61SDaniel Vetter * drm_dev_unplug - unplug a DRM device
477c07dcd61SDaniel Vetter * @dev: DRM device
478c07dcd61SDaniel Vetter *
479c07dcd61SDaniel Vetter * This unplugs a hotpluggable DRM device, which makes it inaccessible to
480bee330f3SNoralf Trønnes * userspace operations. Entry-points can use drm_dev_enter() and
481bee330f3SNoralf Trønnes * drm_dev_exit() to protect device resources in a race free manner. This
482c07dcd61SDaniel Vetter * essentially unregisters the device like drm_dev_unregister(), but can be
483c07dcd61SDaniel Vetter * called while there are still open users of @dev.
484c07dcd61SDaniel Vetter */
drm_dev_unplug(struct drm_device * dev)485c07dcd61SDaniel Vetter void drm_dev_unplug(struct drm_device *dev)
48621d70354SDave Airlie {
487bee330f3SNoralf Trønnes /*
488bee330f3SNoralf Trønnes * After synchronizing any critical read section is guaranteed to see
489bee330f3SNoralf Trønnes * the new value of ->unplugged, and any critical section which might
490bee330f3SNoralf Trønnes * still have seen the old value of ->unplugged is guaranteed to have
491bee330f3SNoralf Trønnes * finished.
492bee330f3SNoralf Trønnes */
493bee330f3SNoralf Trønnes dev->unplugged = true;
494bee330f3SNoralf Trønnes synchronize_srcu(&drm_unplug_srcu);
495069035c5SOleksandr Andrushchenko
496069035c5SOleksandr Andrushchenko drm_dev_unregister(dev);
497b9d4efa8SAndrey Grodzovsky
498b9d4efa8SAndrey Grodzovsky /* Clear all CPU mappings pointing to this device */
499b9d4efa8SAndrey Grodzovsky unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
50021d70354SDave Airlie }
501c07dcd61SDaniel Vetter EXPORT_SYMBOL(drm_dev_unplug);
50221d70354SDave Airlie
50321d70354SDave Airlie /*
50421d70354SDave Airlie * DRM internal mount
50521d70354SDave Airlie * We want to be able to allocate our own "struct address_space" to control
50621d70354SDave Airlie * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
50721d70354SDave Airlie * stand-alone address_space objects, so we need an underlying inode. As there
50821d70354SDave Airlie * is no way to allocate an independent inode easily, we need a fake internal
50921d70354SDave Airlie * VFS mount-point.
51021d70354SDave Airlie *
51121d70354SDave Airlie * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
51221d70354SDave Airlie * frees it again. You are allowed to use iget() and iput() to get references to
51321d70354SDave Airlie * the inode. But each drm_fs_inode_new() call must be paired with exactly one
51421d70354SDave Airlie * drm_fs_inode_free() call (which does not have to be the last iput()).
51521d70354SDave Airlie * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
51621d70354SDave Airlie * between multiple inode-users. You could, technically, call
51721d70354SDave Airlie * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
51821d70354SDave Airlie * iput(), but this way you'd end up with a new vfsmount for each inode.
51921d70354SDave Airlie */
52021d70354SDave Airlie
52121d70354SDave Airlie static int drm_fs_cnt;
52221d70354SDave Airlie static struct vfsmount *drm_fs_mnt;
52321d70354SDave Airlie
drm_fs_init_fs_context(struct fs_context * fc)5244a457910SDavid Howells static int drm_fs_init_fs_context(struct fs_context *fc)
52521d70354SDave Airlie {
5264a457910SDavid Howells return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM;
52721d70354SDave Airlie }
52821d70354SDave Airlie
52921d70354SDave Airlie static struct file_system_type drm_fs_type = {
53021d70354SDave Airlie .name = "drm",
53121d70354SDave Airlie .owner = THIS_MODULE,
5324a457910SDavid Howells .init_fs_context = drm_fs_init_fs_context,
53321d70354SDave Airlie .kill_sb = kill_anon_super,
53421d70354SDave Airlie };
53521d70354SDave Airlie
drm_fs_inode_new(void)53621d70354SDave Airlie static struct inode *drm_fs_inode_new(void)
53721d70354SDave Airlie {
53821d70354SDave Airlie struct inode *inode;
53921d70354SDave Airlie int r;
54021d70354SDave Airlie
54121d70354SDave Airlie r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
54221d70354SDave Airlie if (r < 0) {
54321d70354SDave Airlie DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
54421d70354SDave Airlie return ERR_PTR(r);
54521d70354SDave Airlie }
54621d70354SDave Airlie
54721d70354SDave Airlie inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
54821d70354SDave Airlie if (IS_ERR(inode))
54921d70354SDave Airlie simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
55021d70354SDave Airlie
55121d70354SDave Airlie return inode;
55221d70354SDave Airlie }
55321d70354SDave Airlie
drm_fs_inode_free(struct inode * inode)55421d70354SDave Airlie static void drm_fs_inode_free(struct inode *inode)
55521d70354SDave Airlie {
55621d70354SDave Airlie if (inode) {
55721d70354SDave Airlie iput(inode);
55821d70354SDave Airlie simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
55921d70354SDave Airlie }
56021d70354SDave Airlie }
56121d70354SDave Airlie
56221d70354SDave Airlie /**
56386ab67dfSDaniel Vetter * DOC: component helper usage recommendations
56486ab67dfSDaniel Vetter *
56586ab67dfSDaniel Vetter * DRM drivers that drive hardware where a logical device consists of a pile of
56686ab67dfSDaniel Vetter * independent hardware blocks are recommended to use the :ref:`component helper
56786ab67dfSDaniel Vetter * library<component>`. For consistency and better options for code reuse the
56886ab67dfSDaniel Vetter * following guidelines apply:
56986ab67dfSDaniel Vetter *
57086ab67dfSDaniel Vetter * - The entire device initialization procedure should be run from the
5714c8e84b8SDaniel Vetter * &component_master_ops.master_bind callback, starting with
5724c8e84b8SDaniel Vetter * devm_drm_dev_alloc(), then binding all components with
5734c8e84b8SDaniel Vetter * component_bind_all() and finishing with drm_dev_register().
57486ab67dfSDaniel Vetter *
57586ab67dfSDaniel Vetter * - The opaque pointer passed to all components through component_bind_all()
57686ab67dfSDaniel Vetter * should point at &struct drm_device of the device instance, not some driver
57786ab67dfSDaniel Vetter * specific private structure.
57886ab67dfSDaniel Vetter *
57986ab67dfSDaniel Vetter * - The component helper fills the niche where further standardization of
58086ab67dfSDaniel Vetter * interfaces is not practical. When there already is, or will be, a
58186ab67dfSDaniel Vetter * standardized interface like &drm_bridge or &drm_panel, providing its own
58286ab67dfSDaniel Vetter * functions to find such components at driver load time, like
58386ab67dfSDaniel Vetter * drm_of_find_panel_or_bridge(), then the component helper should not be
58486ab67dfSDaniel Vetter * used.
58586ab67dfSDaniel Vetter */
58686ab67dfSDaniel Vetter
drm_dev_init_release(struct drm_device * dev,void * res)5872cbf7fc6SDaniel Vetter static void drm_dev_init_release(struct drm_device *dev, void *res)
5882cbf7fc6SDaniel Vetter {
5892cbf7fc6SDaniel Vetter drm_legacy_ctxbitmap_cleanup(dev);
5902cbf7fc6SDaniel Vetter drm_legacy_remove_map_hash(dev);
5912cbf7fc6SDaniel Vetter drm_fs_inode_free(dev->anon_inode);
5922cbf7fc6SDaniel Vetter
5932cbf7fc6SDaniel Vetter put_device(dev->dev);
5942cbf7fc6SDaniel Vetter /* Prevent use-after-free in drm_managed_release when debugging is
5952cbf7fc6SDaniel Vetter * enabled. Slightly awkward, but can't really be helped. */
5962cbf7fc6SDaniel Vetter dev->dev = NULL;
5972cbf7fc6SDaniel Vetter mutex_destroy(&dev->master_mutex);
5982cbf7fc6SDaniel Vetter mutex_destroy(&dev->clientlist_mutex);
5992cbf7fc6SDaniel Vetter mutex_destroy(&dev->filelist_mutex);
6002cbf7fc6SDaniel Vetter mutex_destroy(&dev->struct_mutex);
6011c9cacbeSMaíra Canal mutex_destroy(&dev->debugfs_mutex);
6022cbf7fc6SDaniel Vetter drm_legacy_destroy_members(dev);
6032cbf7fc6SDaniel Vetter }
6042cbf7fc6SDaniel Vetter
drm_dev_init(struct drm_device * dev,const struct drm_driver * driver,struct device * parent)605a7d39439SDaniel Vetter static int drm_dev_init(struct drm_device *dev,
6068f5c7aa0SDaniel Vetter const struct drm_driver *driver,
60721d70354SDave Airlie struct device *parent)
60821d70354SDave Airlie {
609acf20ed0SWang Hai struct inode *inode;
61021d70354SDave Airlie int ret;
61121d70354SDave Airlie
612371c2279SAlexandru Moise if (!drm_core_init_complete) {
613371c2279SAlexandru Moise DRM_ERROR("DRM core is not initialized\n");
614371c2279SAlexandru Moise return -ENODEV;
615371c2279SAlexandru Moise }
616371c2279SAlexandru Moise
6178b6fc114SAditya Pakki if (WARN_ON(!parent))
6188b6fc114SAditya Pakki return -EINVAL;
619f08877e7SEmil Velikov
62021d70354SDave Airlie kref_init(&dev->ref);
62156be6503SNoralf Trønnes dev->dev = get_device(parent);
62221d70354SDave Airlie dev->driver = driver;
62321d70354SDave Airlie
624c6603c74SDaniel Vetter INIT_LIST_HEAD(&dev->managed.resources);
625c6603c74SDaniel Vetter spin_lock_init(&dev->managed.lock);
626c6603c74SDaniel Vetter
62718ace11fSVille Syrjälä /* no per-device feature limits by default */
62818ace11fSVille Syrjälä dev->driver_features = ~0u;
62918ace11fSVille Syrjälä
6307428ff70SOded Gabbay if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL) &&
6317428ff70SOded Gabbay (drm_core_check_feature(dev, DRIVER_RENDER) ||
6327428ff70SOded Gabbay drm_core_check_feature(dev, DRIVER_MODESET))) {
6337428ff70SOded Gabbay DRM_ERROR("DRM driver can't be both a compute acceleration and graphics driver\n");
6347428ff70SOded Gabbay return -EINVAL;
6357428ff70SOded Gabbay }
6367428ff70SOded Gabbay
6378437dd73SDave Airlie drm_legacy_init_members(dev);
63821d70354SDave Airlie INIT_LIST_HEAD(&dev->filelist);
639c76f0f7cSNoralf Trønnes INIT_LIST_HEAD(&dev->filelist_internal);
640c76f0f7cSNoralf Trønnes INIT_LIST_HEAD(&dev->clientlist);
64121d70354SDave Airlie INIT_LIST_HEAD(&dev->vblank_event_list);
6421c9cacbeSMaíra Canal INIT_LIST_HEAD(&dev->debugfs_list);
64321d70354SDave Airlie
64421d70354SDave Airlie spin_lock_init(&dev->event_lock);
64521d70354SDave Airlie mutex_init(&dev->struct_mutex);
6461d2ac403SDaniel Vetter mutex_init(&dev->filelist_mutex);
647c76f0f7cSNoralf Trønnes mutex_init(&dev->clientlist_mutex);
64821d70354SDave Airlie mutex_init(&dev->master_mutex);
6491c9cacbeSMaíra Canal mutex_init(&dev->debugfs_mutex);
65021d70354SDave Airlie
651ff963634SShang XiaoJing ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
6522cbf7fc6SDaniel Vetter if (ret)
6532cbf7fc6SDaniel Vetter return ret;
6542cbf7fc6SDaniel Vetter
655acf20ed0SWang Hai inode = drm_fs_inode_new();
656acf20ed0SWang Hai if (IS_ERR(inode)) {
657acf20ed0SWang Hai ret = PTR_ERR(inode);
65821d70354SDave Airlie DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
6592cbf7fc6SDaniel Vetter goto err;
66021d70354SDave Airlie }
66121d70354SDave Airlie
662acf20ed0SWang Hai dev->anon_inode = inode;
663acf20ed0SWang Hai
6647428ff70SOded Gabbay if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) {
6657428ff70SOded Gabbay ret = drm_minor_alloc(dev, DRM_MINOR_ACCEL);
6667428ff70SOded Gabbay if (ret)
6677428ff70SOded Gabbay goto err;
6687428ff70SOded Gabbay } else {
66921d70354SDave Airlie if (drm_core_check_feature(dev, DRIVER_RENDER)) {
67021d70354SDave Airlie ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
67121d70354SDave Airlie if (ret)
672f96306f9SDaniel Vetter goto err;
67321d70354SDave Airlie }
67421d70354SDave Airlie
675a3ccc461SDavid Herrmann ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
67621d70354SDave Airlie if (ret)
677f96306f9SDaniel Vetter goto err;
6787428ff70SOded Gabbay }
67921d70354SDave Airlie
680fabb0e2aSDave Airlie ret = drm_legacy_create_map_hash(dev);
681b209aca3SChris Wilson if (ret)
682f96306f9SDaniel Vetter goto err;
68321d70354SDave Airlie
684ba6976c1SDaniel Vetter drm_legacy_ctxbitmap_init(dev);
68521d70354SDave Airlie
6861bcecfacSAndrzej Hajda if (drm_core_check_feature(dev, DRIVER_GEM)) {
68721d70354SDave Airlie ret = drm_gem_init(dev);
68821d70354SDave Airlie if (ret) {
68921d70354SDave Airlie DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
690f96306f9SDaniel Vetter goto err;
69121d70354SDave Airlie }
69221d70354SDave Airlie }
69321d70354SDave Airlie
6945519fea9SChristian König dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);
6955519fea9SChristian König if (!dev->unique) {
6965519fea9SChristian König ret = -ENOMEM;
697641b9103SDaniel Vetter goto err;
6985519fea9SChristian König }
699e112e593SNicolas Iooss
700b209aca3SChris Wilson return 0;
70121d70354SDave Airlie
7022cbf7fc6SDaniel Vetter err:
7032cbf7fc6SDaniel Vetter drm_managed_release(dev);
7042cbf7fc6SDaniel Vetter
705b209aca3SChris Wilson return ret;
706b209aca3SChris Wilson }
707b209aca3SChris Wilson
devm_drm_dev_init_release(void * data)7089b1f1b6bSNoralf Trønnes static void devm_drm_dev_init_release(void *data)
7099b1f1b6bSNoralf Trønnes {
7109b1f1b6bSNoralf Trønnes drm_dev_put(data);
7119b1f1b6bSNoralf Trønnes }
7129b1f1b6bSNoralf Trønnes
devm_drm_dev_init(struct device * parent,struct drm_device * dev,const struct drm_driver * driver)7134c8e84b8SDaniel Vetter static int devm_drm_dev_init(struct device *parent,
7149b1f1b6bSNoralf Trønnes struct drm_device *dev,
7158f5c7aa0SDaniel Vetter const struct drm_driver *driver)
7169b1f1b6bSNoralf Trønnes {
7179b1f1b6bSNoralf Trønnes int ret;
7189b1f1b6bSNoralf Trønnes
7199b1f1b6bSNoralf Trønnes ret = drm_dev_init(dev, driver, parent);
7209b1f1b6bSNoralf Trønnes if (ret)
7219b1f1b6bSNoralf Trønnes return ret;
7229b1f1b6bSNoralf Trønnes
72313283a24STian Tao return devm_add_action_or_reset(parent,
72413283a24STian Tao devm_drm_dev_init_release, dev);
7259b1f1b6bSNoralf Trønnes }
7269b1f1b6bSNoralf Trønnes
__devm_drm_dev_alloc(struct device * parent,const struct drm_driver * driver,size_t size,size_t offset)7278f5c7aa0SDaniel Vetter void *__devm_drm_dev_alloc(struct device *parent,
7288f5c7aa0SDaniel Vetter const struct drm_driver *driver,
729b0b5849eSDaniel Vetter size_t size, size_t offset)
730b0b5849eSDaniel Vetter {
731b0b5849eSDaniel Vetter void *container;
732b0b5849eSDaniel Vetter struct drm_device *drm;
733b0b5849eSDaniel Vetter int ret;
734b0b5849eSDaniel Vetter
735b0b5849eSDaniel Vetter container = kzalloc(size, GFP_KERNEL);
736b0b5849eSDaniel Vetter if (!container)
737b0b5849eSDaniel Vetter return ERR_PTR(-ENOMEM);
738b0b5849eSDaniel Vetter
739b0b5849eSDaniel Vetter drm = container + offset;
740b0b5849eSDaniel Vetter ret = devm_drm_dev_init(parent, drm, driver);
741b0b5849eSDaniel Vetter if (ret) {
742b0b5849eSDaniel Vetter kfree(container);
743b0b5849eSDaniel Vetter return ERR_PTR(ret);
744b0b5849eSDaniel Vetter }
745b0b5849eSDaniel Vetter drmm_add_final_kfree(drm, container);
746b0b5849eSDaniel Vetter
747b0b5849eSDaniel Vetter return container;
748b0b5849eSDaniel Vetter }
749b0b5849eSDaniel Vetter EXPORT_SYMBOL(__devm_drm_dev_alloc);
750b0b5849eSDaniel Vetter
751b209aca3SChris Wilson /**
752b209aca3SChris Wilson * drm_dev_alloc - Allocate new DRM device
753b209aca3SChris Wilson * @driver: DRM driver to allocate device for
754b209aca3SChris Wilson * @parent: Parent device object
755b209aca3SChris Wilson *
7564c8e84b8SDaniel Vetter * This is the deprecated version of devm_drm_dev_alloc(), which does not support
7574c8e84b8SDaniel Vetter * subclassing through embedding the struct &drm_device in a driver private
7584c8e84b8SDaniel Vetter * structure, and which does not support automatic cleanup through devres.
759b209aca3SChris Wilson *
760b209aca3SChris Wilson * RETURNS:
7610f288605STom Gundersen * Pointer to new DRM device, or ERR_PTR on failure.
762b209aca3SChris Wilson */
drm_dev_alloc(const struct drm_driver * driver,struct device * parent)7638f5c7aa0SDaniel Vetter struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
764b209aca3SChris Wilson struct device *parent)
765b209aca3SChris Wilson {
766b209aca3SChris Wilson struct drm_device *dev;
767b209aca3SChris Wilson int ret;
768b209aca3SChris Wilson
769b209aca3SChris Wilson dev = kzalloc(sizeof(*dev), GFP_KERNEL);
770b209aca3SChris Wilson if (!dev)
7710f288605STom Gundersen return ERR_PTR(-ENOMEM);
772b209aca3SChris Wilson
773b209aca3SChris Wilson ret = drm_dev_init(dev, driver, parent);
774b209aca3SChris Wilson if (ret) {
77521d70354SDave Airlie kfree(dev);
7760f288605STom Gundersen return ERR_PTR(ret);
77721d70354SDave Airlie }
778b209aca3SChris Wilson
7796f365e56SDaniel Vetter drmm_add_final_kfree(dev, dev);
7806f365e56SDaniel Vetter
781b209aca3SChris Wilson return dev;
782b209aca3SChris Wilson }
78321d70354SDave Airlie EXPORT_SYMBOL(drm_dev_alloc);
78421d70354SDave Airlie
drm_dev_release(struct kref * ref)78521d70354SDave Airlie static void drm_dev_release(struct kref *ref)
78621d70354SDave Airlie {
78721d70354SDave Airlie struct drm_device *dev = container_of(ref, struct drm_device, ref);
78821d70354SDave Airlie
789c6603c74SDaniel Vetter if (dev->driver->release)
790f30c9257SChris Wilson dev->driver->release(dev);
791c6603c74SDaniel Vetter
792c6603c74SDaniel Vetter drm_managed_release(dev);
793c6603c74SDaniel Vetter
794c6603c74SDaniel Vetter kfree(dev->managed.final_kfree);
795f30c9257SChris Wilson }
79621d70354SDave Airlie
79721d70354SDave Airlie /**
7989a96f550SAishwarya Pant * drm_dev_get - Take reference of a DRM device
79921d70354SDave Airlie * @dev: device to take reference of or NULL
80021d70354SDave Airlie *
80121d70354SDave Airlie * This increases the ref-count of @dev by one. You *must* already own a
8029a96f550SAishwarya Pant * reference when calling this. Use drm_dev_put() to drop this reference
80321d70354SDave Airlie * again.
80421d70354SDave Airlie *
80521d70354SDave Airlie * This function never fails. However, this function does not provide *any*
80621d70354SDave Airlie * guarantee whether the device is alive or running. It only provides a
80721d70354SDave Airlie * reference to the object and the memory associated with it.
80821d70354SDave Airlie */
drm_dev_get(struct drm_device * dev)8099a96f550SAishwarya Pant void drm_dev_get(struct drm_device *dev)
81021d70354SDave Airlie {
81121d70354SDave Airlie if (dev)
81221d70354SDave Airlie kref_get(&dev->ref);
81321d70354SDave Airlie }
8149a96f550SAishwarya Pant EXPORT_SYMBOL(drm_dev_get);
81521d70354SDave Airlie
81621d70354SDave Airlie /**
8179a96f550SAishwarya Pant * drm_dev_put - Drop reference of a DRM device
81821d70354SDave Airlie * @dev: device to drop reference of or NULL
81921d70354SDave Airlie *
82021d70354SDave Airlie * This decreases the ref-count of @dev by one. The device is destroyed if the
82121d70354SDave Airlie * ref-count drops to zero.
82221d70354SDave Airlie */
drm_dev_put(struct drm_device * dev)8239a96f550SAishwarya Pant void drm_dev_put(struct drm_device *dev)
82421d70354SDave Airlie {
82521d70354SDave Airlie if (dev)
82621d70354SDave Airlie kref_put(&dev->ref, drm_dev_release);
82721d70354SDave Airlie }
8289a96f550SAishwarya Pant EXPORT_SYMBOL(drm_dev_put);
8299a96f550SAishwarya Pant
create_compat_control_link(struct drm_device * dev)8306449b088SDaniel Vetter static int create_compat_control_link(struct drm_device *dev)
8316449b088SDaniel Vetter {
8326449b088SDaniel Vetter struct drm_minor *minor;
8336449b088SDaniel Vetter char *name;
8346449b088SDaniel Vetter int ret;
8356449b088SDaniel Vetter
8366449b088SDaniel Vetter if (!drm_core_check_feature(dev, DRIVER_MODESET))
8376449b088SDaniel Vetter return 0;
8386449b088SDaniel Vetter
8396449b088SDaniel Vetter minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
8406449b088SDaniel Vetter if (!minor)
8416449b088SDaniel Vetter return 0;
8426449b088SDaniel Vetter
8436449b088SDaniel Vetter /*
8446449b088SDaniel Vetter * Some existing userspace out there uses the existing of the controlD*
8456449b088SDaniel Vetter * sysfs files to figure out whether it's a modeset driver. It only does
8466449b088SDaniel Vetter * readdir, hence a symlink is sufficient (and the least confusing
8476449b088SDaniel Vetter * option). Otherwise controlD* is entirely unused.
8486449b088SDaniel Vetter *
8496449b088SDaniel Vetter * Old controlD chardev have been allocated in the range
8506449b088SDaniel Vetter * 64-127.
8516449b088SDaniel Vetter */
8526449b088SDaniel Vetter name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
8536449b088SDaniel Vetter if (!name)
8546449b088SDaniel Vetter return -ENOMEM;
8556449b088SDaniel Vetter
8566449b088SDaniel Vetter ret = sysfs_create_link(minor->kdev->kobj.parent,
8576449b088SDaniel Vetter &minor->kdev->kobj,
8586449b088SDaniel Vetter name);
8596449b088SDaniel Vetter
8606449b088SDaniel Vetter kfree(name);
8616449b088SDaniel Vetter
8626449b088SDaniel Vetter return ret;
8636449b088SDaniel Vetter }
8646449b088SDaniel Vetter
remove_compat_control_link(struct drm_device * dev)8656449b088SDaniel Vetter static void remove_compat_control_link(struct drm_device *dev)
8666449b088SDaniel Vetter {
8676449b088SDaniel Vetter struct drm_minor *minor;
8686449b088SDaniel Vetter char *name;
8696449b088SDaniel Vetter
8706449b088SDaniel Vetter if (!drm_core_check_feature(dev, DRIVER_MODESET))
8716449b088SDaniel Vetter return;
8726449b088SDaniel Vetter
8736449b088SDaniel Vetter minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
8746449b088SDaniel Vetter if (!minor)
8756449b088SDaniel Vetter return;
8766449b088SDaniel Vetter
8777f6df440SHaneen Mohammed name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
8786449b088SDaniel Vetter if (!name)
8796449b088SDaniel Vetter return;
8806449b088SDaniel Vetter
8816449b088SDaniel Vetter sysfs_remove_link(minor->kdev->kobj.parent, name);
8826449b088SDaniel Vetter
8836449b088SDaniel Vetter kfree(name);
8846449b088SDaniel Vetter }
8856449b088SDaniel Vetter
88621d70354SDave Airlie /**
88721d70354SDave Airlie * drm_dev_register - Register DRM device
88821d70354SDave Airlie * @dev: Device to register
88921d70354SDave Airlie * @flags: Flags passed to the driver's .load() function
89021d70354SDave Airlie *
89121d70354SDave Airlie * Register the DRM device @dev with the system, advertise device to user-space
89233e70110SDaniel Vetter * and start normal device operation. @dev must be initialized via drm_dev_init()
893e28cd4d0SChris Wilson * previously.
89421d70354SDave Airlie *
89521d70354SDave Airlie * Never call this twice on any device!
89621d70354SDave Airlie *
8976e3f797cSDaniel Vetter * NOTE: To ensure backward compatibility with existing drivers method this
898ef40cbf9SDaniel Vetter * function calls the &drm_driver.load method after registering the device
899ef40cbf9SDaniel Vetter * nodes, creating race conditions. Usage of the &drm_driver.load methods is
900ef40cbf9SDaniel Vetter * therefore deprecated, drivers must perform all initialization before calling
9016e3f797cSDaniel Vetter * drm_dev_register().
9026e3f797cSDaniel Vetter *
90321d70354SDave Airlie * RETURNS:
90421d70354SDave Airlie * 0 on success, negative error code on failure.
90521d70354SDave Airlie */
drm_dev_register(struct drm_device * dev,unsigned long flags)90621d70354SDave Airlie int drm_dev_register(struct drm_device *dev, unsigned long flags)
90721d70354SDave Airlie {
9088f5c7aa0SDaniel Vetter const struct drm_driver *driver = dev->driver;
90921d70354SDave Airlie int ret;
91021d70354SDave Airlie
91171427795SVille Syrjälä if (!driver->load)
91271427795SVille Syrjälä drm_mode_config_validate(dev);
91371427795SVille Syrjälä
9145dad34f3SDaniel Vetter WARN_ON(!dev->managed.final_kfree);
9155dad34f3SDaniel Vetter
9164017ad7bSDaniel Vetter if (drm_dev_needs_global_mutex(dev))
91721d70354SDave Airlie mutex_lock(&drm_global_mutex);
91821d70354SDave Airlie
91921d70354SDave Airlie ret = drm_minor_register(dev, DRM_MINOR_RENDER);
92021d70354SDave Airlie if (ret)
92121d70354SDave Airlie goto err_minors;
92221d70354SDave Airlie
923a3ccc461SDavid Herrmann ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
92421d70354SDave Airlie if (ret)
92521d70354SDave Airlie goto err_minors;
92621d70354SDave Airlie
9277428ff70SOded Gabbay ret = drm_minor_register(dev, DRM_MINOR_ACCEL);
9287428ff70SOded Gabbay if (ret)
9297428ff70SOded Gabbay goto err_minors;
9307428ff70SOded Gabbay
9316449b088SDaniel Vetter ret = create_compat_control_link(dev);
9326449b088SDaniel Vetter if (ret)
9336449b088SDaniel Vetter goto err_minors;
9346449b088SDaniel Vetter
935e0f32f78SDaniel Vetter dev->registered = true;
936e0f32f78SDaniel Vetter
9375f70ba02SUwe Kleine-König if (driver->load) {
9385f70ba02SUwe Kleine-König ret = driver->load(dev, flags);
93921d70354SDave Airlie if (ret)
94021d70354SDave Airlie goto err_minors;
94121d70354SDave Airlie }
94221d70354SDave Airlie
943*af9d3967SDmitry Baryshkov if (drm_core_check_feature(dev, DRIVER_MODESET)) {
944*af9d3967SDmitry Baryshkov ret = drm_modeset_register_all(dev);
945*af9d3967SDmitry Baryshkov if (ret)
946*af9d3967SDmitry Baryshkov goto err_unload;
947*af9d3967SDmitry Baryshkov }
948e28cd4d0SChris Wilson
94975f6dfe3SGabriel Krisman Bertazi DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
95075f6dfe3SGabriel Krisman Bertazi driver->name, driver->major, driver->minor,
9516098909cSChris Wilson driver->patchlevel, driver->date,
9526098909cSChris Wilson dev->dev ? dev_name(dev->dev) : "virtual device",
9537428ff70SOded Gabbay dev->primary ? dev->primary->index : dev->accel->index);
95475f6dfe3SGabriel Krisman Bertazi
95521d70354SDave Airlie goto out_unlock;
95621d70354SDave Airlie
957*af9d3967SDmitry Baryshkov err_unload:
958*af9d3967SDmitry Baryshkov if (dev->driver->unload)
959*af9d3967SDmitry Baryshkov dev->driver->unload(dev);
96021d70354SDave Airlie err_minors:
9616449b088SDaniel Vetter remove_compat_control_link(dev);
9627428ff70SOded Gabbay drm_minor_unregister(dev, DRM_MINOR_ACCEL);
963a3ccc461SDavid Herrmann drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
96421d70354SDave Airlie drm_minor_unregister(dev, DRM_MINOR_RENDER);
96521d70354SDave Airlie out_unlock:
9664017ad7bSDaniel Vetter if (drm_dev_needs_global_mutex(dev))
96721d70354SDave Airlie mutex_unlock(&drm_global_mutex);
96821d70354SDave Airlie return ret;
96921d70354SDave Airlie }
97021d70354SDave Airlie EXPORT_SYMBOL(drm_dev_register);
97121d70354SDave Airlie
97221d70354SDave Airlie /**
97321d70354SDave Airlie * drm_dev_unregister - Unregister DRM device
97421d70354SDave Airlie * @dev: Device to unregister
97521d70354SDave Airlie *
97621d70354SDave Airlie * Unregister the DRM device from the system. This does the reverse of
97721d70354SDave Airlie * drm_dev_register() but does not deallocate the device. The caller must call
9786a98a6e4SBrandon Pollack * drm_dev_put() to drop their final reference, unless it is managed with devres
9796a98a6e4SBrandon Pollack * (as devices allocated with devm_drm_dev_alloc() are), in which case there is
9806a98a6e4SBrandon Pollack * already an unwind action registered.
9816e3f797cSDaniel Vetter *
982c07dcd61SDaniel Vetter * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
983c07dcd61SDaniel Vetter * which can be called while there are still open users of @dev.
984c07dcd61SDaniel Vetter *
9856e3f797cSDaniel Vetter * This should be called first in the device teardown code to make sure
9866e3f797cSDaniel Vetter * userspace can't access the device instance any more.
98721d70354SDave Airlie */
drm_dev_unregister(struct drm_device * dev)98821d70354SDave Airlie void drm_dev_unregister(struct drm_device *dev)
98921d70354SDave Airlie {
9902e45eeacSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_LEGACY))
99121d70354SDave Airlie drm_lastclose(dev);
99221d70354SDave Airlie
993e6e7b48bSDaniel Vetter dev->registered = false;
994e6e7b48bSDaniel Vetter
995c76f0f7cSNoralf Trønnes drm_client_dev_unregister(dev);
996c76f0f7cSNoralf Trønnes
997bee7fb15SChris Wilson if (drm_core_check_feature(dev, DRIVER_MODESET))
99879190ea2SBenjamin Gaignard drm_modeset_unregister_all(dev);
999e28cd4d0SChris Wilson
100021d70354SDave Airlie if (dev->driver->unload)
100121d70354SDave Airlie dev->driver->unload(dev);
100221d70354SDave Airlie
10036bff2279SThomas Zimmermann drm_legacy_pci_agp_destroy(dev);
100435a28021SDave Airlie drm_legacy_rmmaps(dev);
100521d70354SDave Airlie
10066449b088SDaniel Vetter remove_compat_control_link(dev);
10077428ff70SOded Gabbay drm_minor_unregister(dev, DRM_MINOR_ACCEL);
1008a3ccc461SDavid Herrmann drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
100921d70354SDave Airlie drm_minor_unregister(dev, DRM_MINOR_RENDER);
101021d70354SDave Airlie }
101121d70354SDave Airlie EXPORT_SYMBOL(drm_dev_unregister);
101221d70354SDave Airlie
101321d70354SDave Airlie /*
101421d70354SDave Airlie * DRM Core
101521d70354SDave Airlie * The DRM core module initializes all global DRM objects and makes them
101621d70354SDave Airlie * available to drivers. Once setup, drivers can probe their respective
101721d70354SDave Airlie * devices.
101821d70354SDave Airlie * Currently, core management includes:
101921d70354SDave Airlie * - The "DRM-Global" key/value database
102021d70354SDave Airlie * - Global ID management for connectors
102121d70354SDave Airlie * - DRM major number allocation
102221d70354SDave Airlie * - DRM minor management
102321d70354SDave Airlie * - DRM sysfs class
102421d70354SDave Airlie * - DRM debugfs root
102521d70354SDave Airlie *
102621d70354SDave Airlie * Furthermore, the DRM core provides dynamic char-dev lookups. For each
102721d70354SDave Airlie * interface registered on a DRM device, you can request minor numbers from DRM
102821d70354SDave Airlie * core. DRM core takes care of major-number management and char-dev
102921d70354SDave Airlie * registration. A stub ->open() callback forwards any open() requests to the
103021d70354SDave Airlie * registered minor.
103121d70354SDave Airlie */
103221d70354SDave Airlie
drm_stub_open(struct inode * inode,struct file * filp)103321d70354SDave Airlie static int drm_stub_open(struct inode *inode, struct file *filp)
103421d70354SDave Airlie {
103521d70354SDave Airlie const struct file_operations *new_fops;
103621d70354SDave Airlie struct drm_minor *minor;
103721d70354SDave Airlie int err;
103821d70354SDave Airlie
103921d70354SDave Airlie DRM_DEBUG("\n");
104021d70354SDave Airlie
104121d70354SDave Airlie minor = drm_minor_acquire(iminor(inode));
1042591a2abfSDaniel Vetter if (IS_ERR(minor))
1043591a2abfSDaniel Vetter return PTR_ERR(minor);
104421d70354SDave Airlie
104521d70354SDave Airlie new_fops = fops_get(minor->dev->driver->fops);
104621d70354SDave Airlie if (!new_fops) {
104721d70354SDave Airlie err = -ENODEV;
1048591a2abfSDaniel Vetter goto out;
104921d70354SDave Airlie }
105021d70354SDave Airlie
105121d70354SDave Airlie replace_fops(filp, new_fops);
105221d70354SDave Airlie if (filp->f_op->open)
105321d70354SDave Airlie err = filp->f_op->open(inode, filp);
105421d70354SDave Airlie else
105521d70354SDave Airlie err = 0;
105621d70354SDave Airlie
1057591a2abfSDaniel Vetter out:
105821d70354SDave Airlie drm_minor_release(minor);
1059591a2abfSDaniel Vetter
106021d70354SDave Airlie return err;
106121d70354SDave Airlie }
106221d70354SDave Airlie
106321d70354SDave Airlie static const struct file_operations drm_stub_fops = {
106421d70354SDave Airlie .owner = THIS_MODULE,
106521d70354SDave Airlie .open = drm_stub_open,
106621d70354SDave Airlie .llseek = noop_llseek,
106721d70354SDave Airlie };
106821d70354SDave Airlie
drm_core_exit(void)10692cc107dcSDavid Herrmann static void drm_core_exit(void)
10702cc107dcSDavid Herrmann {
1071a1a98689SHans de Goede drm_privacy_screen_lookup_exit();
10727428ff70SOded Gabbay accel_core_exit();
10732cc107dcSDavid Herrmann unregister_chrdev(DRM_MAJOR, "drm");
10742cc107dcSDavid Herrmann debugfs_remove(drm_debugfs_root);
10752cc107dcSDavid Herrmann drm_sysfs_destroy();
10762cc107dcSDavid Herrmann idr_destroy(&drm_minors_idr);
10772cc107dcSDavid Herrmann drm_connector_ida_destroy();
10782cc107dcSDavid Herrmann }
10792cc107dcSDavid Herrmann
drm_core_init(void)108021d70354SDave Airlie static int __init drm_core_init(void)
108121d70354SDave Airlie {
10822cc107dcSDavid Herrmann int ret;
108321d70354SDave Airlie
108421d70354SDave Airlie drm_connector_ida_init();
108521d70354SDave Airlie idr_init(&drm_minors_idr);
1086b7e32befSThomas Hellström drm_memcpy_init_early();
108721d70354SDave Airlie
1088fcc90213SDavid Herrmann ret = drm_sysfs_init();
1089fcc90213SDavid Herrmann if (ret < 0) {
10902cc107dcSDavid Herrmann DRM_ERROR("Cannot create DRM class: %d\n", ret);
10912cc107dcSDavid Herrmann goto error;
109221d70354SDave Airlie }
109321d70354SDave Airlie
109421d70354SDave Airlie drm_debugfs_root = debugfs_create_dir("dri", NULL);
109521d70354SDave Airlie
10962cc107dcSDavid Herrmann ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
10972cc107dcSDavid Herrmann if (ret < 0)
10982cc107dcSDavid Herrmann goto error;
10992cc107dcSDavid Herrmann
11007428ff70SOded Gabbay ret = accel_core_init();
11017428ff70SOded Gabbay if (ret < 0)
11027428ff70SOded Gabbay goto error;
11037428ff70SOded Gabbay
1104a1a98689SHans de Goede drm_privacy_screen_lookup_init();
1105a1a98689SHans de Goede
1106371c2279SAlexandru Moise drm_core_init_complete = true;
1107371c2279SAlexandru Moise
1108e82dfa00SChris Wilson DRM_DEBUG("Initialized\n");
110921d70354SDave Airlie return 0;
111021d70354SDave Airlie
11112cc107dcSDavid Herrmann error:
11122cc107dcSDavid Herrmann drm_core_exit();
111321d70354SDave Airlie return ret;
111421d70354SDave Airlie }
111521d70354SDave Airlie
111621d70354SDave Airlie module_init(drm_core_init);
111721d70354SDave Airlie module_exit(drm_core_exit);
1118