xref: /openbmc/linux/drivers/gpu/drm/drm_file.c (revision 2cde325e)
19acdac68SDaniel Vetter /*
29acdac68SDaniel Vetter  * \author Rickard E. (Rik) Faith <faith@valinux.com>
39acdac68SDaniel Vetter  * \author Daryll Strauss <daryll@valinux.com>
49acdac68SDaniel Vetter  * \author Gareth Hughes <gareth@valinux.com>
59acdac68SDaniel Vetter  */
69acdac68SDaniel Vetter 
79acdac68SDaniel Vetter /*
89acdac68SDaniel Vetter  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
99acdac68SDaniel Vetter  *
109acdac68SDaniel Vetter  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
119acdac68SDaniel Vetter  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
129acdac68SDaniel Vetter  * All Rights Reserved.
139acdac68SDaniel Vetter  *
149acdac68SDaniel Vetter  * Permission is hereby granted, free of charge, to any person obtaining a
159acdac68SDaniel Vetter  * copy of this software and associated documentation files (the "Software"),
169acdac68SDaniel Vetter  * to deal in the Software without restriction, including without limitation
179acdac68SDaniel Vetter  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
189acdac68SDaniel Vetter  * and/or sell copies of the Software, and to permit persons to whom the
199acdac68SDaniel Vetter  * Software is furnished to do so, subject to the following conditions:
209acdac68SDaniel Vetter  *
219acdac68SDaniel Vetter  * The above copyright notice and this permission notice (including the next
229acdac68SDaniel Vetter  * paragraph) shall be included in all copies or substantial portions of the
239acdac68SDaniel Vetter  * Software.
249acdac68SDaniel Vetter  *
259acdac68SDaniel Vetter  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
269acdac68SDaniel Vetter  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
279acdac68SDaniel Vetter  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
289acdac68SDaniel Vetter  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
299acdac68SDaniel Vetter  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
309acdac68SDaniel Vetter  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
319acdac68SDaniel Vetter  * OTHER DEALINGS IN THE SOFTWARE.
329acdac68SDaniel Vetter  */
339acdac68SDaniel Vetter 
344748aa16SChris Wilson #include <linux/anon_inodes.h>
350500c04eSSam Ravnborg #include <linux/dma-fence.h>
364748aa16SChris Wilson #include <linux/file.h>
370500c04eSSam Ravnborg #include <linux/module.h>
380500c04eSSam Ravnborg #include <linux/pci.h>
399acdac68SDaniel Vetter #include <linux/poll.h>
409acdac68SDaniel Vetter #include <linux/slab.h>
41a8f8b1d9SDaniel Vetter 
42c76f0f7cSNoralf Trønnes #include <drm/drm_client.h>
430500c04eSSam Ravnborg #include <drm/drm_drv.h>
44a8f8b1d9SDaniel Vetter #include <drm/drm_file.h>
45686b21b5SRob Clark #include <drm/drm_gem.h>
460500c04eSSam Ravnborg #include <drm/drm_print.h>
47a8f8b1d9SDaniel Vetter 
489acdac68SDaniel Vetter #include "drm_crtc_internal.h"
490500c04eSSam Ravnborg #include "drm_internal.h"
500500c04eSSam Ravnborg #include "drm_legacy.h"
519acdac68SDaniel Vetter 
529acdac68SDaniel Vetter /* from BKL pushdown */
539acdac68SDaniel Vetter DEFINE_MUTEX(drm_global_mutex);
549acdac68SDaniel Vetter 
drm_dev_needs_global_mutex(struct drm_device * dev)554017ad7bSDaniel Vetter bool drm_dev_needs_global_mutex(struct drm_device *dev)
564017ad7bSDaniel Vetter {
574017ad7bSDaniel Vetter 	/*
584017ad7bSDaniel Vetter 	 * Legacy drivers rely on all kinds of BKL locking semantics, don't
594017ad7bSDaniel Vetter 	 * bother. They also still need BKL locking for their ioctls, so better
604017ad7bSDaniel Vetter 	 * safe than sorry.
614017ad7bSDaniel Vetter 	 */
624017ad7bSDaniel Vetter 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
634017ad7bSDaniel Vetter 		return true;
644017ad7bSDaniel Vetter 
654017ad7bSDaniel Vetter 	/*
664017ad7bSDaniel Vetter 	 * The deprecated ->load callback must be called after the driver is
674017ad7bSDaniel Vetter 	 * already registered. This means such drivers rely on the BKL to make
684017ad7bSDaniel Vetter 	 * sure an open can't proceed until the driver is actually fully set up.
694017ad7bSDaniel Vetter 	 * Similar hilarity holds for the unload callback.
704017ad7bSDaniel Vetter 	 */
714017ad7bSDaniel Vetter 	if (dev->driver->load || dev->driver->unload)
724017ad7bSDaniel Vetter 		return true;
734017ad7bSDaniel Vetter 
744017ad7bSDaniel Vetter 	/*
754017ad7bSDaniel Vetter 	 * Drivers with the lastclose callback assume that it's synchronized
764017ad7bSDaniel Vetter 	 * against concurrent opens, which again needs the BKL. The proper fix
774017ad7bSDaniel Vetter 	 * is to use the drm_client infrastructure with proper locking for each
784017ad7bSDaniel Vetter 	 * client.
794017ad7bSDaniel Vetter 	 */
804017ad7bSDaniel Vetter 	if (dev->driver->lastclose)
814017ad7bSDaniel Vetter 		return true;
824017ad7bSDaniel Vetter 
834017ad7bSDaniel Vetter 	return false;
844017ad7bSDaniel Vetter }
854017ad7bSDaniel Vetter 
869acdac68SDaniel Vetter /**
879acdac68SDaniel Vetter  * DOC: file operations
889acdac68SDaniel Vetter  *
899acdac68SDaniel Vetter  * Drivers must define the file operations structure that forms the DRM
909acdac68SDaniel Vetter  * userspace API entry point, even though most of those operations are
91b93658f8SDaniel Vetter  * implemented in the DRM core. The resulting &struct file_operations must be
92b93658f8SDaniel Vetter  * stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
939acdac68SDaniel Vetter  * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
94b93658f8SDaniel Vetter  * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
95b93658f8SDaniel Vetter  * need to sprinkle #ifdef into the code. Drivers which implement private ioctls
96b93658f8SDaniel Vetter  * that require 32/64 bit compatibility support must provide their own
97b93658f8SDaniel Vetter  * &file_operations.compat_ioctl handler that processes private ioctls and calls
98b93658f8SDaniel Vetter  * drm_compat_ioctl() for core ioctls.
999acdac68SDaniel Vetter  *
1009acdac68SDaniel Vetter  * In addition drm_read() and drm_poll() provide support for DRM events. DRM
1019acdac68SDaniel Vetter  * events are a generic and extensible means to send asynchronous events to
1029acdac68SDaniel Vetter  * userspace through the file descriptor. They are used to send vblank event and
1039acdac68SDaniel Vetter  * page flip completions by the KMS API. But drivers can also use it for their
1049acdac68SDaniel Vetter  * own needs, e.g. to signal completion of rendering.
1059acdac68SDaniel Vetter  *
106b93658f8SDaniel Vetter  * For the driver-side event interface see drm_event_reserve_init() and
107b93658f8SDaniel Vetter  * drm_send_event() as the main starting points.
108b93658f8SDaniel Vetter  *
1099acdac68SDaniel Vetter  * The memory mapping implementation will vary depending on how the driver
1109acdac68SDaniel Vetter  * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
1119acdac68SDaniel Vetter  * function, modern drivers should use one of the provided memory-manager
112f5ca8eb6SThomas Zimmermann  * specific implementations. For GEM-based drivers this is drm_gem_mmap().
1139acdac68SDaniel Vetter  *
1149acdac68SDaniel Vetter  * No other file operations are supported by the DRM userspace API. Overall the
115bb2eaba6SDaniel Vetter  * following is an example &file_operations structure::
1169acdac68SDaniel Vetter  *
1179acdac68SDaniel Vetter  *     static const example_drm_fops = {
1189acdac68SDaniel Vetter  *             .owner = THIS_MODULE,
1199acdac68SDaniel Vetter  *             .open = drm_open,
1209acdac68SDaniel Vetter  *             .release = drm_release,
1219acdac68SDaniel Vetter  *             .unlocked_ioctl = drm_ioctl,
1229acdac68SDaniel Vetter  *             .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
1239acdac68SDaniel Vetter  *             .poll = drm_poll,
1249acdac68SDaniel Vetter  *             .read = drm_read,
1259acdac68SDaniel Vetter  *             .llseek = no_llseek,
1269acdac68SDaniel Vetter  *             .mmap = drm_gem_mmap,
1279acdac68SDaniel Vetter  *     };
128b93658f8SDaniel Vetter  *
129f42e1819SDaniel Vetter  * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
1304a83c26aSDanilo Krummrich  * DMA based drivers there is the DEFINE_DRM_GEM_DMA_FOPS() macro to make this
131f42e1819SDaniel Vetter  * simpler.
132bb2eaba6SDaniel Vetter  *
133bb2eaba6SDaniel Vetter  * The driver's &file_operations must be stored in &drm_driver.fops.
134bb2eaba6SDaniel Vetter  *
135bb2eaba6SDaniel Vetter  * For driver-private IOCTL handling see the more detailed discussion in
136bb2eaba6SDaniel Vetter  * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
1379acdac68SDaniel Vetter  */
1389acdac68SDaniel Vetter 
1391572042aSDavid Herrmann /**
1401572042aSDavid Herrmann  * drm_file_alloc - allocate file context
1411572042aSDavid Herrmann  * @minor: minor to allocate on
1421572042aSDavid Herrmann  *
1431572042aSDavid Herrmann  * This allocates a new DRM file context. It is not linked into any context and
1441572042aSDavid Herrmann  * can be used by the caller freely. Note that the context keeps a pointer to
1451572042aSDavid Herrmann  * @minor, so it must be freed before @minor is.
1461572042aSDavid Herrmann  *
1471572042aSDavid Herrmann  * RETURNS:
1481572042aSDavid Herrmann  * Pointer to newly allocated context, ERR_PTR on failure.
1491572042aSDavid Herrmann  */
drm_file_alloc(struct drm_minor * minor)1501572042aSDavid Herrmann struct drm_file *drm_file_alloc(struct drm_minor *minor)
1511572042aSDavid Herrmann {
1523f09a0cdSRob Clark 	static atomic64_t ident = ATOMIC_INIT(0);
1531572042aSDavid Herrmann 	struct drm_device *dev = minor->dev;
1541572042aSDavid Herrmann 	struct drm_file *file;
1551572042aSDavid Herrmann 	int ret;
1561572042aSDavid Herrmann 
1571572042aSDavid Herrmann 	file = kzalloc(sizeof(*file), GFP_KERNEL);
1581572042aSDavid Herrmann 	if (!file)
1591572042aSDavid Herrmann 		return ERR_PTR(-ENOMEM);
1601572042aSDavid Herrmann 
1613f09a0cdSRob Clark 	/* Get a unique identifier for fdinfo: */
1623f09a0cdSRob Clark 	file->client_id = atomic64_inc_return(&ident);
163031ddd28STvrtko Ursulin 	rcu_assign_pointer(file->pid, get_pid(task_tgid(current)));
1641572042aSDavid Herrmann 	file->minor = minor;
1651572042aSDavid Herrmann 
1661572042aSDavid Herrmann 	/* for compatibility root is always authenticated */
1671572042aSDavid Herrmann 	file->authenticated = capable(CAP_SYS_ADMIN);
1681572042aSDavid Herrmann 
1691572042aSDavid Herrmann 	INIT_LIST_HEAD(&file->lhead);
1701572042aSDavid Herrmann 	INIT_LIST_HEAD(&file->fbs);
1711572042aSDavid Herrmann 	mutex_init(&file->fbs_lock);
1721572042aSDavid Herrmann 	INIT_LIST_HEAD(&file->blobs);
1731572042aSDavid Herrmann 	INIT_LIST_HEAD(&file->pending_event_list);
1741572042aSDavid Herrmann 	INIT_LIST_HEAD(&file->event_list);
1751572042aSDavid Herrmann 	init_waitqueue_head(&file->event_wait);
1761572042aSDavid Herrmann 	file->event_space = 4096; /* set aside 4k for event buffer */
1771572042aSDavid Herrmann 
1780b0860a3SDesmond Cheong Zhi Xi 	spin_lock_init(&file->master_lookup_lock);
1791572042aSDavid Herrmann 	mutex_init(&file->event_read_lock);
1801572042aSDavid Herrmann 
1811572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_GEM))
1821572042aSDavid Herrmann 		drm_gem_open(dev, file);
1831572042aSDavid Herrmann 
1841572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1851572042aSDavid Herrmann 		drm_syncobj_open(file);
1861572042aSDavid Herrmann 
1871572042aSDavid Herrmann 	drm_prime_init_file_private(&file->prime);
1881572042aSDavid Herrmann 
1891572042aSDavid Herrmann 	if (dev->driver->open) {
1901572042aSDavid Herrmann 		ret = dev->driver->open(dev, file);
1911572042aSDavid Herrmann 		if (ret < 0)
1921572042aSDavid Herrmann 			goto out_prime_destroy;
1931572042aSDavid Herrmann 	}
1941572042aSDavid Herrmann 
1951572042aSDavid Herrmann 	return file;
1961572042aSDavid Herrmann 
1971572042aSDavid Herrmann out_prime_destroy:
1981572042aSDavid Herrmann 	drm_prime_destroy_file_private(&file->prime);
1991572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
2001572042aSDavid Herrmann 		drm_syncobj_release(file);
2011572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_GEM))
2021572042aSDavid Herrmann 		drm_gem_release(dev, file);
203031ddd28STvrtko Ursulin 	put_pid(rcu_access_pointer(file->pid));
2041572042aSDavid Herrmann 	kfree(file);
2051572042aSDavid Herrmann 
2061572042aSDavid Herrmann 	return ERR_PTR(ret);
2071572042aSDavid Herrmann }
2081572042aSDavid Herrmann 
drm_events_release(struct drm_file * file_priv)2091572042aSDavid Herrmann static void drm_events_release(struct drm_file *file_priv)
2101572042aSDavid Herrmann {
2111572042aSDavid Herrmann 	struct drm_device *dev = file_priv->minor->dev;
2121572042aSDavid Herrmann 	struct drm_pending_event *e, *et;
2131572042aSDavid Herrmann 	unsigned long flags;
2141572042aSDavid Herrmann 
2151572042aSDavid Herrmann 	spin_lock_irqsave(&dev->event_lock, flags);
2161572042aSDavid Herrmann 
2171572042aSDavid Herrmann 	/* Unlink pending events */
2181572042aSDavid Herrmann 	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
2191572042aSDavid Herrmann 				 pending_link) {
2201572042aSDavid Herrmann 		list_del(&e->pending_link);
2211572042aSDavid Herrmann 		e->file_priv = NULL;
2221572042aSDavid Herrmann 	}
2231572042aSDavid Herrmann 
2241572042aSDavid Herrmann 	/* Remove unconsumed events */
2251572042aSDavid Herrmann 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
2261572042aSDavid Herrmann 		list_del(&e->link);
2271572042aSDavid Herrmann 		kfree(e);
2281572042aSDavid Herrmann 	}
2291572042aSDavid Herrmann 
2301572042aSDavid Herrmann 	spin_unlock_irqrestore(&dev->event_lock, flags);
2311572042aSDavid Herrmann }
2321572042aSDavid Herrmann 
2331572042aSDavid Herrmann /**
2341572042aSDavid Herrmann  * drm_file_free - free file context
2351572042aSDavid Herrmann  * @file: context to free, or NULL
2361572042aSDavid Herrmann  *
2371572042aSDavid Herrmann  * This destroys and deallocates a DRM file context previously allocated via
2381572042aSDavid Herrmann  * drm_file_alloc(). The caller must make sure to unlink it from any contexts
2391572042aSDavid Herrmann  * before calling this.
2401572042aSDavid Herrmann  *
2411572042aSDavid Herrmann  * If NULL is passed, this is a no-op.
2421572042aSDavid Herrmann  */
drm_file_free(struct drm_file * file)2431572042aSDavid Herrmann void drm_file_free(struct drm_file *file)
2441572042aSDavid Herrmann {
2451572042aSDavid Herrmann 	struct drm_device *dev;
2461572042aSDavid Herrmann 
2471572042aSDavid Herrmann 	if (!file)
2481572042aSDavid Herrmann 		return;
2491572042aSDavid Herrmann 
2501572042aSDavid Herrmann 	dev = file->minor->dev;
2511572042aSDavid Herrmann 
252723dad97STvrtko Ursulin 	drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
2535a2ba992SEmil Velikov 		     current->comm, task_pid_nr(current),
2541572042aSDavid Herrmann 		     (long)old_encode_dev(file->minor->kdev->devt),
2557e13ad89SChris Wilson 		     atomic_read(&dev->open_count));
2561572042aSDavid Herrmann 
25757bb1ee6SDaniel Vetter #ifdef CONFIG_DRM_LEGACY
2581572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
2591572042aSDavid Herrmann 	    dev->driver->preclose)
2601572042aSDavid Herrmann 		dev->driver->preclose(dev, file);
26157bb1ee6SDaniel Vetter #endif
2621572042aSDavid Herrmann 
2631572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
2641572042aSDavid Herrmann 		drm_legacy_lock_release(dev, file->filp);
2651572042aSDavid Herrmann 
2661572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
2671572042aSDavid Herrmann 		drm_legacy_reclaim_buffers(dev, file);
2681572042aSDavid Herrmann 
2691572042aSDavid Herrmann 	drm_events_release(file);
2701572042aSDavid Herrmann 
2711572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2721572042aSDavid Herrmann 		drm_fb_release(file);
2731572042aSDavid Herrmann 		drm_property_destroy_user_blobs(dev, file);
2741572042aSDavid Herrmann 	}
2751572042aSDavid Herrmann 
2761572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
2771572042aSDavid Herrmann 		drm_syncobj_release(file);
2781572042aSDavid Herrmann 
2791572042aSDavid Herrmann 	if (drm_core_check_feature(dev, DRIVER_GEM))
2801572042aSDavid Herrmann 		drm_gem_release(dev, file);
2811572042aSDavid Herrmann 
2821572042aSDavid Herrmann 	drm_legacy_ctxbitmap_flush(dev, file);
2831572042aSDavid Herrmann 
2841572042aSDavid Herrmann 	if (drm_is_primary_client(file))
2851572042aSDavid Herrmann 		drm_master_release(file);
2861572042aSDavid Herrmann 
2871572042aSDavid Herrmann 	if (dev->driver->postclose)
2881572042aSDavid Herrmann 		dev->driver->postclose(dev, file);
2891572042aSDavid Herrmann 
2901572042aSDavid Herrmann 	drm_prime_destroy_file_private(&file->prime);
2911572042aSDavid Herrmann 
2921572042aSDavid Herrmann 	WARN_ON(!list_empty(&file->event_list));
2931572042aSDavid Herrmann 
294031ddd28STvrtko Ursulin 	put_pid(rcu_access_pointer(file->pid));
2951572042aSDavid Herrmann 	kfree(file);
2961572042aSDavid Herrmann }
2971572042aSDavid Herrmann 
drm_close_helper(struct file * filp)298e21710a8SEmil Velikov static void drm_close_helper(struct file *filp)
299e21710a8SEmil Velikov {
300e21710a8SEmil Velikov 	struct drm_file *file_priv = filp->private_data;
301e21710a8SEmil Velikov 	struct drm_device *dev = file_priv->minor->dev;
302e21710a8SEmil Velikov 
303e21710a8SEmil Velikov 	mutex_lock(&dev->filelist_mutex);
304e21710a8SEmil Velikov 	list_del(&file_priv->lhead);
305e21710a8SEmil Velikov 	mutex_unlock(&dev->filelist_mutex);
306e21710a8SEmil Velikov 
307e21710a8SEmil Velikov 	drm_file_free(file_priv);
308e21710a8SEmil Velikov }
309e21710a8SEmil Velikov 
3109acdac68SDaniel Vetter /*
3119acdac68SDaniel Vetter  * Check whether DRI will run on this CPU.
3129acdac68SDaniel Vetter  *
3139acdac68SDaniel Vetter  * \return non-zero if the DRI will run on this CPU, or zero otherwise.
3149acdac68SDaniel Vetter  */
drm_cpu_valid(void)3159acdac68SDaniel Vetter static int drm_cpu_valid(void)
3169acdac68SDaniel Vetter {
3179acdac68SDaniel Vetter #if defined(__sparc__) && !defined(__sparc_v9__)
3189acdac68SDaniel Vetter 	return 0;		/* No cmpxchg before v9 sparc. */
3199acdac68SDaniel Vetter #endif
3209acdac68SDaniel Vetter 	return 1;
3219acdac68SDaniel Vetter }
3229acdac68SDaniel Vetter 
3239acdac68SDaniel Vetter /*
32485dce7ffSEmil Velikov  * Called whenever a process opens a drm node
3259acdac68SDaniel Vetter  *
3269acdac68SDaniel Vetter  * \param filp file pointer.
3279acdac68SDaniel Vetter  * \param minor acquired minor-object.
3289acdac68SDaniel Vetter  * \return zero on success or a negative number on failure.
3299acdac68SDaniel Vetter  *
3309acdac68SDaniel Vetter  * Creates and initializes a drm_file structure for the file private data in \p
3319acdac68SDaniel Vetter  * filp and add it into the double linked list in \p dev.
3329acdac68SDaniel Vetter  */
drm_open_helper(struct file * filp,struct drm_minor * minor)3332c204f3dSOded Gabbay int drm_open_helper(struct file *filp, struct drm_minor *minor)
3349acdac68SDaniel Vetter {
3359acdac68SDaniel Vetter 	struct drm_device *dev = minor->dev;
3369acdac68SDaniel Vetter 	struct drm_file *priv;
3377eeaeb90SNoralf Trønnes 	int ret;
3389acdac68SDaniel Vetter 
3399acdac68SDaniel Vetter 	if (filp->f_flags & O_EXCL)
3409acdac68SDaniel Vetter 		return -EBUSY;	/* No exclusive opens */
3419acdac68SDaniel Vetter 	if (!drm_cpu_valid())
3429acdac68SDaniel Vetter 		return -EINVAL;
34317ee1eb6SEmil Velikov 	if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
34417ee1eb6SEmil Velikov 	    dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
3459acdac68SDaniel Vetter 		return -EINVAL;
3469acdac68SDaniel Vetter 
347723dad97STvrtko Ursulin 	drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n",
348723dad97STvrtko Ursulin 		     current->comm, task_pid_nr(current), minor->index);
3499acdac68SDaniel Vetter 
3501572042aSDavid Herrmann 	priv = drm_file_alloc(minor);
3511572042aSDavid Herrmann 	if (IS_ERR(priv))
3521572042aSDavid Herrmann 		return PTR_ERR(priv);
3539acdac68SDaniel Vetter 
3547eeaeb90SNoralf Trønnes 	if (drm_is_primary_client(priv)) {
3557eeaeb90SNoralf Trønnes 		ret = drm_master_open(priv);
3567eeaeb90SNoralf Trønnes 		if (ret) {
3577eeaeb90SNoralf Trønnes 			drm_file_free(priv);
3587eeaeb90SNoralf Trønnes 			return ret;
3597eeaeb90SNoralf Trønnes 		}
3607eeaeb90SNoralf Trønnes 	}
3617eeaeb90SNoralf Trønnes 
3629acdac68SDaniel Vetter 	filp->private_data = priv;
36376ef6b28SDave Airlie 	filp->f_mode |= FMODE_UNSIGNED_OFFSET;
3649acdac68SDaniel Vetter 	priv->filp = filp;
3659acdac68SDaniel Vetter 
3669acdac68SDaniel Vetter 	mutex_lock(&dev->filelist_mutex);
3679acdac68SDaniel Vetter 	list_add(&priv->lhead, &dev->filelist);
3689acdac68SDaniel Vetter 	mutex_unlock(&dev->filelist_mutex);
3699acdac68SDaniel Vetter 
37014054f2aSThomas Zimmermann #ifdef CONFIG_DRM_LEGACY
3719acdac68SDaniel Vetter #ifdef __alpha__
3729acdac68SDaniel Vetter 	/*
3739acdac68SDaniel Vetter 	 * Default the hose
3749acdac68SDaniel Vetter 	 */
3759acdac68SDaniel Vetter 	if (!dev->hose) {
3769acdac68SDaniel Vetter 		struct pci_dev *pci_dev;
377948de842SSuraj Upadhyay 
3789acdac68SDaniel Vetter 		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
3799acdac68SDaniel Vetter 		if (pci_dev) {
3809acdac68SDaniel Vetter 			dev->hose = pci_dev->sysdata;
3819acdac68SDaniel Vetter 			pci_dev_put(pci_dev);
3829acdac68SDaniel Vetter 		}
3839acdac68SDaniel Vetter 		if (!dev->hose) {
3849acdac68SDaniel Vetter 			struct pci_bus *b = list_entry(pci_root_buses.next,
3859acdac68SDaniel Vetter 				struct pci_bus, node);
3869acdac68SDaniel Vetter 			if (b)
3879acdac68SDaniel Vetter 				dev->hose = b->sysdata;
3889acdac68SDaniel Vetter 		}
3899acdac68SDaniel Vetter 	}
3909acdac68SDaniel Vetter #endif
39114054f2aSThomas Zimmermann #endif
3929acdac68SDaniel Vetter 
3939acdac68SDaniel Vetter 	return 0;
3949acdac68SDaniel Vetter }
3959acdac68SDaniel Vetter 
396094aa54fSDaniel Vetter /**
397094aa54fSDaniel Vetter  * drm_open - open method for DRM file
398094aa54fSDaniel Vetter  * @inode: device inode
399094aa54fSDaniel Vetter  * @filp: file pointer.
400094aa54fSDaniel Vetter  *
401094aa54fSDaniel Vetter  * This function must be used by drivers as their &file_operations.open method.
402094aa54fSDaniel Vetter  * It looks up the correct DRM device and instantiates all the per-file
403094aa54fSDaniel Vetter  * resources for it. It also calls the &drm_driver.open driver callback.
404094aa54fSDaniel Vetter  *
405094aa54fSDaniel Vetter  * RETURNS:
406094aa54fSDaniel Vetter  *
4070ae865efSCai Huoqing  * 0 on success or negative errno value on failure.
408094aa54fSDaniel Vetter  */
drm_open(struct inode * inode,struct file * filp)409094aa54fSDaniel Vetter int drm_open(struct inode *inode, struct file *filp)
410094aa54fSDaniel Vetter {
411094aa54fSDaniel Vetter 	struct drm_device *dev;
412094aa54fSDaniel Vetter 	struct drm_minor *minor;
413094aa54fSDaniel Vetter 	int retcode;
414094aa54fSDaniel Vetter 	int need_setup = 0;
415094aa54fSDaniel Vetter 
416094aa54fSDaniel Vetter 	minor = drm_minor_acquire(iminor(inode));
417094aa54fSDaniel Vetter 	if (IS_ERR(minor))
418094aa54fSDaniel Vetter 		return PTR_ERR(minor);
419094aa54fSDaniel Vetter 
4204017ad7bSDaniel Vetter 	dev = minor->dev;
4214017ad7bSDaniel Vetter 	if (drm_dev_needs_global_mutex(dev))
422591a2abfSDaniel Vetter 		mutex_lock(&drm_global_mutex);
423591a2abfSDaniel Vetter 
4247e13ad89SChris Wilson 	if (!atomic_fetch_inc(&dev->open_count))
425094aa54fSDaniel Vetter 		need_setup = 1;
426094aa54fSDaniel Vetter 
427094aa54fSDaniel Vetter 	/* share address_space across all char-devs of a single device */
428094aa54fSDaniel Vetter 	filp->f_mapping = dev->anon_inode->i_mapping;
429094aa54fSDaniel Vetter 
430094aa54fSDaniel Vetter 	retcode = drm_open_helper(filp, minor);
431094aa54fSDaniel Vetter 	if (retcode)
432094aa54fSDaniel Vetter 		goto err_undo;
433094aa54fSDaniel Vetter 	if (need_setup) {
434094aa54fSDaniel Vetter 		retcode = drm_legacy_setup(dev);
435094aa54fSDaniel Vetter 		if (retcode) {
436094aa54fSDaniel Vetter 			drm_close_helper(filp);
437094aa54fSDaniel Vetter 			goto err_undo;
438094aa54fSDaniel Vetter 		}
439094aa54fSDaniel Vetter 	}
440591a2abfSDaniel Vetter 
4414017ad7bSDaniel Vetter 	if (drm_dev_needs_global_mutex(dev))
442591a2abfSDaniel Vetter 		mutex_unlock(&drm_global_mutex);
443591a2abfSDaniel Vetter 
444094aa54fSDaniel Vetter 	return 0;
445094aa54fSDaniel Vetter 
446094aa54fSDaniel Vetter err_undo:
4477e13ad89SChris Wilson 	atomic_dec(&dev->open_count);
4484017ad7bSDaniel Vetter 	if (drm_dev_needs_global_mutex(dev))
449591a2abfSDaniel Vetter 		mutex_unlock(&drm_global_mutex);
450094aa54fSDaniel Vetter 	drm_minor_release(minor);
451094aa54fSDaniel Vetter 	return retcode;
452094aa54fSDaniel Vetter }
453094aa54fSDaniel Vetter EXPORT_SYMBOL(drm_open);
454094aa54fSDaniel Vetter 
drm_lastclose(struct drm_device * dev)4559acdac68SDaniel Vetter void drm_lastclose(struct drm_device * dev)
4569acdac68SDaniel Vetter {
457723dad97STvrtko Ursulin 	drm_dbg_core(dev, "\n");
4589acdac68SDaniel Vetter 
4599acdac68SDaniel Vetter 	if (dev->driver->lastclose)
4609acdac68SDaniel Vetter 		dev->driver->lastclose(dev);
461723dad97STvrtko Ursulin 	drm_dbg_core(dev, "driver lastclose completed\n");
4629acdac68SDaniel Vetter 
4639acdac68SDaniel Vetter 	if (drm_core_check_feature(dev, DRIVER_LEGACY))
4649acdac68SDaniel Vetter 		drm_legacy_dev_reinit(dev);
465c76f0f7cSNoralf Trønnes 
466c76f0f7cSNoralf Trønnes 	drm_client_dev_restore(dev);
4679acdac68SDaniel Vetter }
4689acdac68SDaniel Vetter 
4699acdac68SDaniel Vetter /**
4709acdac68SDaniel Vetter  * drm_release - release method for DRM file
4719acdac68SDaniel Vetter  * @inode: device inode
4729acdac68SDaniel Vetter  * @filp: file pointer.
4739acdac68SDaniel Vetter  *
474b93658f8SDaniel Vetter  * This function must be used by drivers as their &file_operations.release
475b93658f8SDaniel Vetter  * method. It frees any resources associated with the open file, and calls the
47645c3d213SDaniel Vetter  * &drm_driver.postclose driver callback. If this is the last open file for the
47745c3d213SDaniel Vetter  * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
4789acdac68SDaniel Vetter  *
4799acdac68SDaniel Vetter  * RETURNS:
4809acdac68SDaniel Vetter  *
4819acdac68SDaniel Vetter  * Always succeeds and returns 0.
4829acdac68SDaniel Vetter  */
drm_release(struct inode * inode,struct file * filp)4839acdac68SDaniel Vetter int drm_release(struct inode *inode, struct file *filp)
4849acdac68SDaniel Vetter {
4859acdac68SDaniel Vetter 	struct drm_file *file_priv = filp->private_data;
4869acdac68SDaniel Vetter 	struct drm_minor *minor = file_priv->minor;
4879acdac68SDaniel Vetter 	struct drm_device *dev = minor->dev;
4889acdac68SDaniel Vetter 
4894017ad7bSDaniel Vetter 	if (drm_dev_needs_global_mutex(dev))
4909acdac68SDaniel Vetter 		mutex_lock(&drm_global_mutex);
4919acdac68SDaniel Vetter 
492723dad97STvrtko Ursulin 	drm_dbg_core(dev, "open_count = %d\n", atomic_read(&dev->open_count));
4939acdac68SDaniel Vetter 
494e21710a8SEmil Velikov 	drm_close_helper(filp);
4959acdac68SDaniel Vetter 
4967e13ad89SChris Wilson 	if (atomic_dec_and_test(&dev->open_count))
4979acdac68SDaniel Vetter 		drm_lastclose(dev);
4981ee57d4dSNoralf Trønnes 
4994017ad7bSDaniel Vetter 	if (drm_dev_needs_global_mutex(dev))
5009acdac68SDaniel Vetter 		mutex_unlock(&drm_global_mutex);
5019acdac68SDaniel Vetter 
5029acdac68SDaniel Vetter 	drm_minor_release(minor);
5039acdac68SDaniel Vetter 
5049acdac68SDaniel Vetter 	return 0;
5059acdac68SDaniel Vetter }
5069acdac68SDaniel Vetter EXPORT_SYMBOL(drm_release);
5079acdac68SDaniel Vetter 
drm_file_update_pid(struct drm_file * filp)508031ddd28STvrtko Ursulin void drm_file_update_pid(struct drm_file *filp)
509031ddd28STvrtko Ursulin {
510031ddd28STvrtko Ursulin 	struct drm_device *dev;
511031ddd28STvrtko Ursulin 	struct pid *pid, *old;
512031ddd28STvrtko Ursulin 
513031ddd28STvrtko Ursulin 	/*
514031ddd28STvrtko Ursulin 	 * Master nodes need to keep the original ownership in order for
515031ddd28STvrtko Ursulin 	 * drm_master_check_perm to keep working correctly. (See comment in
516031ddd28STvrtko Ursulin 	 * drm_auth.c.)
517031ddd28STvrtko Ursulin 	 */
518031ddd28STvrtko Ursulin 	if (filp->was_master)
519031ddd28STvrtko Ursulin 		return;
520031ddd28STvrtko Ursulin 
521031ddd28STvrtko Ursulin 	pid = task_tgid(current);
522031ddd28STvrtko Ursulin 
523031ddd28STvrtko Ursulin 	/*
524031ddd28STvrtko Ursulin 	 * Quick unlocked check since the model is a single handover followed by
525031ddd28STvrtko Ursulin 	 * exclusive repeated use.
526031ddd28STvrtko Ursulin 	 */
527031ddd28STvrtko Ursulin 	if (pid == rcu_access_pointer(filp->pid))
528031ddd28STvrtko Ursulin 		return;
529031ddd28STvrtko Ursulin 
530031ddd28STvrtko Ursulin 	dev = filp->minor->dev;
531031ddd28STvrtko Ursulin 	mutex_lock(&dev->filelist_mutex);
532031ddd28STvrtko Ursulin 	old = rcu_replace_pointer(filp->pid, pid, 1);
533031ddd28STvrtko Ursulin 	mutex_unlock(&dev->filelist_mutex);
534031ddd28STvrtko Ursulin 
535031ddd28STvrtko Ursulin 	if (pid != old) {
536031ddd28STvrtko Ursulin 		get_pid(pid);
537031ddd28STvrtko Ursulin 		synchronize_rcu();
538031ddd28STvrtko Ursulin 		put_pid(old);
539031ddd28STvrtko Ursulin 	}
540031ddd28STvrtko Ursulin }
541031ddd28STvrtko Ursulin 
5429acdac68SDaniel Vetter /**
5437a2c65ddSChris Wilson  * drm_release_noglobal - release method for DRM file
5447a2c65ddSChris Wilson  * @inode: device inode
5457a2c65ddSChris Wilson  * @filp: file pointer.
5467a2c65ddSChris Wilson  *
5477a2c65ddSChris Wilson  * This function may be used by drivers as their &file_operations.release
5487a2c65ddSChris Wilson  * method. It frees any resources associated with the open file prior to taking
5497a2c65ddSChris Wilson  * the drm_global_mutex, which then calls the &drm_driver.postclose driver
5507a2c65ddSChris Wilson  * callback. If this is the last open file for the DRM device also proceeds to
5517a2c65ddSChris Wilson  * call the &drm_driver.lastclose driver callback.
5527a2c65ddSChris Wilson  *
5537a2c65ddSChris Wilson  * RETURNS:
5547a2c65ddSChris Wilson  *
5557a2c65ddSChris Wilson  * Always succeeds and returns 0.
5567a2c65ddSChris Wilson  */
drm_release_noglobal(struct inode * inode,struct file * filp)5577a2c65ddSChris Wilson int drm_release_noglobal(struct inode *inode, struct file *filp)
5587a2c65ddSChris Wilson {
5597a2c65ddSChris Wilson 	struct drm_file *file_priv = filp->private_data;
5607a2c65ddSChris Wilson 	struct drm_minor *minor = file_priv->minor;
5617a2c65ddSChris Wilson 	struct drm_device *dev = minor->dev;
5627a2c65ddSChris Wilson 
5637a2c65ddSChris Wilson 	drm_close_helper(filp);
5647a2c65ddSChris Wilson 
5657e13ad89SChris Wilson 	if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
5667a2c65ddSChris Wilson 		drm_lastclose(dev);
5677a2c65ddSChris Wilson 		mutex_unlock(&drm_global_mutex);
5687e13ad89SChris Wilson 	}
5697a2c65ddSChris Wilson 
5707a2c65ddSChris Wilson 	drm_minor_release(minor);
5717a2c65ddSChris Wilson 
5727a2c65ddSChris Wilson 	return 0;
5737a2c65ddSChris Wilson }
5747a2c65ddSChris Wilson EXPORT_SYMBOL(drm_release_noglobal);
5757a2c65ddSChris Wilson 
5767a2c65ddSChris Wilson /**
5779acdac68SDaniel Vetter  * drm_read - read method for DRM file
5789acdac68SDaniel Vetter  * @filp: file pointer
5799acdac68SDaniel Vetter  * @buffer: userspace destination pointer for the read
5809acdac68SDaniel Vetter  * @count: count in bytes to read
5819acdac68SDaniel Vetter  * @offset: offset to read
5829acdac68SDaniel Vetter  *
583b93658f8SDaniel Vetter  * This function must be used by drivers as their &file_operations.read
5840ae865efSCai Huoqing  * method if they use DRM events for asynchronous signalling to userspace.
5859acdac68SDaniel Vetter  * Since events are used by the KMS API for vblank and page flip completion this
5869acdac68SDaniel Vetter  * means all modern display drivers must use it.
5879acdac68SDaniel Vetter  *
588868941b1SJason A. Donenfeld  * @offset is ignored, DRM events are read like a pipe. Polling support is
5899acdac68SDaniel Vetter  * provided by drm_poll().
5909acdac68SDaniel Vetter  *
5919acdac68SDaniel Vetter  * This function will only ever read a full event. Therefore userspace must
5929acdac68SDaniel Vetter  * supply a big enough buffer to fit any event to ensure forward progress. Since
5939acdac68SDaniel Vetter  * the maximum event space is currently 4K it's recommended to just use that for
5949acdac68SDaniel Vetter  * safety.
5959acdac68SDaniel Vetter  *
5969acdac68SDaniel Vetter  * RETURNS:
5979acdac68SDaniel Vetter  *
5989acdac68SDaniel Vetter  * Number of bytes read (always aligned to full events, and can be 0) or a
5999acdac68SDaniel Vetter  * negative error code on failure.
6009acdac68SDaniel Vetter  */
drm_read(struct file * filp,char __user * buffer,size_t count,loff_t * offset)6019acdac68SDaniel Vetter ssize_t drm_read(struct file *filp, char __user *buffer,
6029acdac68SDaniel Vetter 		 size_t count, loff_t *offset)
6039acdac68SDaniel Vetter {
6049acdac68SDaniel Vetter 	struct drm_file *file_priv = filp->private_data;
6059acdac68SDaniel Vetter 	struct drm_device *dev = file_priv->minor->dev;
6069acdac68SDaniel Vetter 	ssize_t ret;
6079acdac68SDaniel Vetter 
6089acdac68SDaniel Vetter 	ret = mutex_lock_interruptible(&file_priv->event_read_lock);
6099acdac68SDaniel Vetter 	if (ret)
6109acdac68SDaniel Vetter 		return ret;
6119acdac68SDaniel Vetter 
6129acdac68SDaniel Vetter 	for (;;) {
6139acdac68SDaniel Vetter 		struct drm_pending_event *e = NULL;
6149acdac68SDaniel Vetter 
6159acdac68SDaniel Vetter 		spin_lock_irq(&dev->event_lock);
6169acdac68SDaniel Vetter 		if (!list_empty(&file_priv->event_list)) {
6179acdac68SDaniel Vetter 			e = list_first_entry(&file_priv->event_list,
6189acdac68SDaniel Vetter 					struct drm_pending_event, link);
6199acdac68SDaniel Vetter 			file_priv->event_space += e->event->length;
6209acdac68SDaniel Vetter 			list_del(&e->link);
6219acdac68SDaniel Vetter 		}
6229acdac68SDaniel Vetter 		spin_unlock_irq(&dev->event_lock);
6239acdac68SDaniel Vetter 
6249acdac68SDaniel Vetter 		if (e == NULL) {
6259acdac68SDaniel Vetter 			if (ret)
6269acdac68SDaniel Vetter 				break;
6279acdac68SDaniel Vetter 
6289acdac68SDaniel Vetter 			if (filp->f_flags & O_NONBLOCK) {
6299acdac68SDaniel Vetter 				ret = -EAGAIN;
6309acdac68SDaniel Vetter 				break;
6319acdac68SDaniel Vetter 			}
6329acdac68SDaniel Vetter 
6339acdac68SDaniel Vetter 			mutex_unlock(&file_priv->event_read_lock);
6349acdac68SDaniel Vetter 			ret = wait_event_interruptible(file_priv->event_wait,
6359acdac68SDaniel Vetter 						       !list_empty(&file_priv->event_list));
6369acdac68SDaniel Vetter 			if (ret >= 0)
6379acdac68SDaniel Vetter 				ret = mutex_lock_interruptible(&file_priv->event_read_lock);
6389acdac68SDaniel Vetter 			if (ret)
6399acdac68SDaniel Vetter 				return ret;
6409acdac68SDaniel Vetter 		} else {
6419acdac68SDaniel Vetter 			unsigned length = e->event->length;
6429acdac68SDaniel Vetter 
6439acdac68SDaniel Vetter 			if (length > count - ret) {
6449acdac68SDaniel Vetter put_back_event:
6459acdac68SDaniel Vetter 				spin_lock_irq(&dev->event_lock);
6469acdac68SDaniel Vetter 				file_priv->event_space -= length;
6479acdac68SDaniel Vetter 				list_add(&e->link, &file_priv->event_list);
6489acdac68SDaniel Vetter 				spin_unlock_irq(&dev->event_lock);
64987189b78SKenny Levinsen 				wake_up_interruptible_poll(&file_priv->event_wait,
65087189b78SKenny Levinsen 					EPOLLIN | EPOLLRDNORM);
6519acdac68SDaniel Vetter 				break;
6529acdac68SDaniel Vetter 			}
6539acdac68SDaniel Vetter 
6549acdac68SDaniel Vetter 			if (copy_to_user(buffer + ret, e->event, length)) {
6559acdac68SDaniel Vetter 				if (ret == 0)
6569acdac68SDaniel Vetter 					ret = -EFAULT;
6579acdac68SDaniel Vetter 				goto put_back_event;
6589acdac68SDaniel Vetter 			}
6599acdac68SDaniel Vetter 
6609acdac68SDaniel Vetter 			ret += length;
6619acdac68SDaniel Vetter 			kfree(e);
6629acdac68SDaniel Vetter 		}
6639acdac68SDaniel Vetter 	}
6649acdac68SDaniel Vetter 	mutex_unlock(&file_priv->event_read_lock);
6659acdac68SDaniel Vetter 
6669acdac68SDaniel Vetter 	return ret;
6679acdac68SDaniel Vetter }
6689acdac68SDaniel Vetter EXPORT_SYMBOL(drm_read);
6699acdac68SDaniel Vetter 
6709acdac68SDaniel Vetter /**
6719acdac68SDaniel Vetter  * drm_poll - poll method for DRM file
6729acdac68SDaniel Vetter  * @filp: file pointer
6739acdac68SDaniel Vetter  * @wait: poll waiter table
6749acdac68SDaniel Vetter  *
675b93658f8SDaniel Vetter  * This function must be used by drivers as their &file_operations.read method
6760ae865efSCai Huoqing  * if they use DRM events for asynchronous signalling to userspace.  Since
677b93658f8SDaniel Vetter  * events are used by the KMS API for vblank and page flip completion this means
678b93658f8SDaniel Vetter  * all modern display drivers must use it.
6799acdac68SDaniel Vetter  *
6809acdac68SDaniel Vetter  * See also drm_read().
6819acdac68SDaniel Vetter  *
6829acdac68SDaniel Vetter  * RETURNS:
6839acdac68SDaniel Vetter  *
6849acdac68SDaniel Vetter  * Mask of POLL flags indicating the current status of the file.
6859acdac68SDaniel Vetter  */
drm_poll(struct file * filp,struct poll_table_struct * wait)686afc9a42bSAl Viro __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
6879acdac68SDaniel Vetter {
6889acdac68SDaniel Vetter 	struct drm_file *file_priv = filp->private_data;
689afc9a42bSAl Viro 	__poll_t mask = 0;
6909acdac68SDaniel Vetter 
6919acdac68SDaniel Vetter 	poll_wait(filp, &file_priv->event_wait, wait);
6929acdac68SDaniel Vetter 
6939acdac68SDaniel Vetter 	if (!list_empty(&file_priv->event_list))
694a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
6959acdac68SDaniel Vetter 
6969acdac68SDaniel Vetter 	return mask;
6979acdac68SDaniel Vetter }
6989acdac68SDaniel Vetter EXPORT_SYMBOL(drm_poll);
6999acdac68SDaniel Vetter 
7009acdac68SDaniel Vetter /**
7019acdac68SDaniel Vetter  * drm_event_reserve_init_locked - init a DRM event and reserve space for it
7029acdac68SDaniel Vetter  * @dev: DRM device
7039acdac68SDaniel Vetter  * @file_priv: DRM file private data
7049acdac68SDaniel Vetter  * @p: tracking structure for the pending event
7059acdac68SDaniel Vetter  * @e: actual event data to deliver to userspace
7069acdac68SDaniel Vetter  *
7079acdac68SDaniel Vetter  * This function prepares the passed in event for eventual delivery. If the event
7089acdac68SDaniel Vetter  * doesn't get delivered (because the IOCTL fails later on, before queuing up
7099acdac68SDaniel Vetter  * anything) then the even must be cancelled and freed using
7109acdac68SDaniel Vetter  * drm_event_cancel_free(). Successfully initialized events should be sent out
7119acdac68SDaniel Vetter  * using drm_send_event() or drm_send_event_locked() to signal completion of the
7129acdac68SDaniel Vetter  * asynchronous event to userspace.
7139acdac68SDaniel Vetter  *
7149acdac68SDaniel Vetter  * If callers embedded @p into a larger structure it must be allocated with
7159acdac68SDaniel Vetter  * kmalloc and @p must be the first member element.
7169acdac68SDaniel Vetter  *
7179acdac68SDaniel Vetter  * This is the locked version of drm_event_reserve_init() for callers which
7189acdac68SDaniel Vetter  * already hold &drm_device.event_lock.
7199acdac68SDaniel Vetter  *
7209acdac68SDaniel Vetter  * RETURNS:
7219acdac68SDaniel Vetter  *
7229acdac68SDaniel Vetter  * 0 on success or a negative error code on failure.
7239acdac68SDaniel Vetter  */
drm_event_reserve_init_locked(struct drm_device * dev,struct drm_file * file_priv,struct drm_pending_event * p,struct drm_event * e)7249acdac68SDaniel Vetter int drm_event_reserve_init_locked(struct drm_device *dev,
7259acdac68SDaniel Vetter 				  struct drm_file *file_priv,
7269acdac68SDaniel Vetter 				  struct drm_pending_event *p,
7279acdac68SDaniel Vetter 				  struct drm_event *e)
7289acdac68SDaniel Vetter {
7299acdac68SDaniel Vetter 	if (file_priv->event_space < e->length)
7309acdac68SDaniel Vetter 		return -ENOMEM;
7319acdac68SDaniel Vetter 
7329acdac68SDaniel Vetter 	file_priv->event_space -= e->length;
7339acdac68SDaniel Vetter 
7349acdac68SDaniel Vetter 	p->event = e;
7359acdac68SDaniel Vetter 	list_add(&p->pending_link, &file_priv->pending_event_list);
7369acdac68SDaniel Vetter 	p->file_priv = file_priv;
7379acdac68SDaniel Vetter 
7389acdac68SDaniel Vetter 	return 0;
7399acdac68SDaniel Vetter }
7409acdac68SDaniel Vetter EXPORT_SYMBOL(drm_event_reserve_init_locked);
7419acdac68SDaniel Vetter 
7429acdac68SDaniel Vetter /**
7439acdac68SDaniel Vetter  * drm_event_reserve_init - init a DRM event and reserve space for it
7449acdac68SDaniel Vetter  * @dev: DRM device
7459acdac68SDaniel Vetter  * @file_priv: DRM file private data
7469acdac68SDaniel Vetter  * @p: tracking structure for the pending event
7479acdac68SDaniel Vetter  * @e: actual event data to deliver to userspace
7489acdac68SDaniel Vetter  *
7499acdac68SDaniel Vetter  * This function prepares the passed in event for eventual delivery. If the event
7509acdac68SDaniel Vetter  * doesn't get delivered (because the IOCTL fails later on, before queuing up
7519acdac68SDaniel Vetter  * anything) then the even must be cancelled and freed using
7529acdac68SDaniel Vetter  * drm_event_cancel_free(). Successfully initialized events should be sent out
7539acdac68SDaniel Vetter  * using drm_send_event() or drm_send_event_locked() to signal completion of the
7549acdac68SDaniel Vetter  * asynchronous event to userspace.
7559acdac68SDaniel Vetter  *
7569acdac68SDaniel Vetter  * If callers embedded @p into a larger structure it must be allocated with
7579acdac68SDaniel Vetter  * kmalloc and @p must be the first member element.
7589acdac68SDaniel Vetter  *
7599acdac68SDaniel Vetter  * Callers which already hold &drm_device.event_lock should use
7609acdac68SDaniel Vetter  * drm_event_reserve_init_locked() instead.
7619acdac68SDaniel Vetter  *
7629acdac68SDaniel Vetter  * RETURNS:
7639acdac68SDaniel Vetter  *
7649acdac68SDaniel Vetter  * 0 on success or a negative error code on failure.
7659acdac68SDaniel Vetter  */
drm_event_reserve_init(struct drm_device * dev,struct drm_file * file_priv,struct drm_pending_event * p,struct drm_event * e)7669acdac68SDaniel Vetter int drm_event_reserve_init(struct drm_device *dev,
7679acdac68SDaniel Vetter 			   struct drm_file *file_priv,
7689acdac68SDaniel Vetter 			   struct drm_pending_event *p,
7699acdac68SDaniel Vetter 			   struct drm_event *e)
7709acdac68SDaniel Vetter {
7719acdac68SDaniel Vetter 	unsigned long flags;
7729acdac68SDaniel Vetter 	int ret;
7739acdac68SDaniel Vetter 
7749acdac68SDaniel Vetter 	spin_lock_irqsave(&dev->event_lock, flags);
7759acdac68SDaniel Vetter 	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
7769acdac68SDaniel Vetter 	spin_unlock_irqrestore(&dev->event_lock, flags);
7779acdac68SDaniel Vetter 
7789acdac68SDaniel Vetter 	return ret;
7799acdac68SDaniel Vetter }
7809acdac68SDaniel Vetter EXPORT_SYMBOL(drm_event_reserve_init);
7819acdac68SDaniel Vetter 
7829acdac68SDaniel Vetter /**
7831e55a53aSMatt Roper  * drm_event_cancel_free - free a DRM event and release its space
7849acdac68SDaniel Vetter  * @dev: DRM device
7859acdac68SDaniel Vetter  * @p: tracking structure for the pending event
7869acdac68SDaniel Vetter  *
7879acdac68SDaniel Vetter  * This function frees the event @p initialized with drm_event_reserve_init()
788b93658f8SDaniel Vetter  * and releases any allocated space. It is used to cancel an event when the
789b93658f8SDaniel Vetter  * nonblocking operation could not be submitted and needed to be aborted.
7909acdac68SDaniel Vetter  */
drm_event_cancel_free(struct drm_device * dev,struct drm_pending_event * p)7919acdac68SDaniel Vetter void drm_event_cancel_free(struct drm_device *dev,
7929acdac68SDaniel Vetter 			   struct drm_pending_event *p)
7939acdac68SDaniel Vetter {
7949acdac68SDaniel Vetter 	unsigned long flags;
795948de842SSuraj Upadhyay 
7969acdac68SDaniel Vetter 	spin_lock_irqsave(&dev->event_lock, flags);
7979acdac68SDaniel Vetter 	if (p->file_priv) {
7989acdac68SDaniel Vetter 		p->file_priv->event_space += p->event->length;
7999acdac68SDaniel Vetter 		list_del(&p->pending_link);
8009acdac68SDaniel Vetter 	}
8019acdac68SDaniel Vetter 	spin_unlock_irqrestore(&dev->event_lock, flags);
8029acdac68SDaniel Vetter 
8039acdac68SDaniel Vetter 	if (p->fence)
8049acdac68SDaniel Vetter 		dma_fence_put(p->fence);
8059acdac68SDaniel Vetter 
8069acdac68SDaniel Vetter 	kfree(p);
8079acdac68SDaniel Vetter }
8089acdac68SDaniel Vetter EXPORT_SYMBOL(drm_event_cancel_free);
8099acdac68SDaniel Vetter 
drm_send_event_helper(struct drm_device * dev,struct drm_pending_event * e,ktime_t timestamp)81064bf1491SFabio M. De Francesco static void drm_send_event_helper(struct drm_device *dev,
811a78e7a51SVeera Sundaram Sankaran 			   struct drm_pending_event *e, ktime_t timestamp)
812a78e7a51SVeera Sundaram Sankaran {
813a78e7a51SVeera Sundaram Sankaran 	assert_spin_locked(&dev->event_lock);
814a78e7a51SVeera Sundaram Sankaran 
815a78e7a51SVeera Sundaram Sankaran 	if (e->completion) {
816a78e7a51SVeera Sundaram Sankaran 		complete_all(e->completion);
817a78e7a51SVeera Sundaram Sankaran 		e->completion_release(e->completion);
818a78e7a51SVeera Sundaram Sankaran 		e->completion = NULL;
819a78e7a51SVeera Sundaram Sankaran 	}
820a78e7a51SVeera Sundaram Sankaran 
821a78e7a51SVeera Sundaram Sankaran 	if (e->fence) {
822a78e7a51SVeera Sundaram Sankaran 		if (timestamp)
823a78e7a51SVeera Sundaram Sankaran 			dma_fence_signal_timestamp(e->fence, timestamp);
824a78e7a51SVeera Sundaram Sankaran 		else
825a78e7a51SVeera Sundaram Sankaran 			dma_fence_signal(e->fence);
826a78e7a51SVeera Sundaram Sankaran 		dma_fence_put(e->fence);
827a78e7a51SVeera Sundaram Sankaran 	}
828a78e7a51SVeera Sundaram Sankaran 
829a78e7a51SVeera Sundaram Sankaran 	if (!e->file_priv) {
830a78e7a51SVeera Sundaram Sankaran 		kfree(e);
831a78e7a51SVeera Sundaram Sankaran 		return;
832a78e7a51SVeera Sundaram Sankaran 	}
833a78e7a51SVeera Sundaram Sankaran 
834a78e7a51SVeera Sundaram Sankaran 	list_del(&e->pending_link);
835a78e7a51SVeera Sundaram Sankaran 	list_add_tail(&e->link,
836a78e7a51SVeera Sundaram Sankaran 		      &e->file_priv->event_list);
837a78e7a51SVeera Sundaram Sankaran 	wake_up_interruptible_poll(&e->file_priv->event_wait,
838a78e7a51SVeera Sundaram Sankaran 		EPOLLIN | EPOLLRDNORM);
839a78e7a51SVeera Sundaram Sankaran }
840a78e7a51SVeera Sundaram Sankaran 
841a78e7a51SVeera Sundaram Sankaran /**
842a78e7a51SVeera Sundaram Sankaran  * drm_send_event_timestamp_locked - send DRM event to file descriptor
843a78e7a51SVeera Sundaram Sankaran  * @dev: DRM device
844a78e7a51SVeera Sundaram Sankaran  * @e: DRM event to deliver
845a78e7a51SVeera Sundaram Sankaran  * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
846a78e7a51SVeera Sundaram Sankaran  * time domain
847a78e7a51SVeera Sundaram Sankaran  *
848a78e7a51SVeera Sundaram Sankaran  * This function sends the event @e, initialized with drm_event_reserve_init(),
849a78e7a51SVeera Sundaram Sankaran  * to its associated userspace DRM file. Callers must already hold
850a78e7a51SVeera Sundaram Sankaran  * &drm_device.event_lock.
851a78e7a51SVeera Sundaram Sankaran  *
852a78e7a51SVeera Sundaram Sankaran  * Note that the core will take care of unlinking and disarming events when the
853a78e7a51SVeera Sundaram Sankaran  * corresponding DRM file is closed. Drivers need not worry about whether the
854a78e7a51SVeera Sundaram Sankaran  * DRM file for this event still exists and can call this function upon
855a78e7a51SVeera Sundaram Sankaran  * completion of the asynchronous work unconditionally.
856a78e7a51SVeera Sundaram Sankaran  */
drm_send_event_timestamp_locked(struct drm_device * dev,struct drm_pending_event * e,ktime_t timestamp)857a78e7a51SVeera Sundaram Sankaran void drm_send_event_timestamp_locked(struct drm_device *dev,
858a78e7a51SVeera Sundaram Sankaran 				     struct drm_pending_event *e, ktime_t timestamp)
859a78e7a51SVeera Sundaram Sankaran {
860a78e7a51SVeera Sundaram Sankaran 	drm_send_event_helper(dev, e, timestamp);
861a78e7a51SVeera Sundaram Sankaran }
862a78e7a51SVeera Sundaram Sankaran EXPORT_SYMBOL(drm_send_event_timestamp_locked);
863a78e7a51SVeera Sundaram Sankaran 
864a78e7a51SVeera Sundaram Sankaran /**
8659acdac68SDaniel Vetter  * drm_send_event_locked - send DRM event to file descriptor
8669acdac68SDaniel Vetter  * @dev: DRM device
8679acdac68SDaniel Vetter  * @e: DRM event to deliver
8689acdac68SDaniel Vetter  *
8699acdac68SDaniel Vetter  * This function sends the event @e, initialized with drm_event_reserve_init(),
8709acdac68SDaniel Vetter  * to its associated userspace DRM file. Callers must already hold
8719acdac68SDaniel Vetter  * &drm_device.event_lock, see drm_send_event() for the unlocked version.
8729acdac68SDaniel Vetter  *
8739acdac68SDaniel Vetter  * Note that the core will take care of unlinking and disarming events when the
8749acdac68SDaniel Vetter  * corresponding DRM file is closed. Drivers need not worry about whether the
8759acdac68SDaniel Vetter  * DRM file for this event still exists and can call this function upon
8769acdac68SDaniel Vetter  * completion of the asynchronous work unconditionally.
8779acdac68SDaniel Vetter  */
drm_send_event_locked(struct drm_device * dev,struct drm_pending_event * e)8789acdac68SDaniel Vetter void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
8799acdac68SDaniel Vetter {
880a78e7a51SVeera Sundaram Sankaran 	drm_send_event_helper(dev, e, 0);
8819acdac68SDaniel Vetter }
8829acdac68SDaniel Vetter EXPORT_SYMBOL(drm_send_event_locked);
8839acdac68SDaniel Vetter 
8849acdac68SDaniel Vetter /**
8859acdac68SDaniel Vetter  * drm_send_event - send DRM event to file descriptor
8869acdac68SDaniel Vetter  * @dev: DRM device
8879acdac68SDaniel Vetter  * @e: DRM event to deliver
8889acdac68SDaniel Vetter  *
8899acdac68SDaniel Vetter  * This function sends the event @e, initialized with drm_event_reserve_init(),
8909acdac68SDaniel Vetter  * to its associated userspace DRM file. This function acquires
8919acdac68SDaniel Vetter  * &drm_device.event_lock, see drm_send_event_locked() for callers which already
8929acdac68SDaniel Vetter  * hold this lock.
8939acdac68SDaniel Vetter  *
8949acdac68SDaniel Vetter  * Note that the core will take care of unlinking and disarming events when the
8959acdac68SDaniel Vetter  * corresponding DRM file is closed. Drivers need not worry about whether the
8969acdac68SDaniel Vetter  * DRM file for this event still exists and can call this function upon
8979acdac68SDaniel Vetter  * completion of the asynchronous work unconditionally.
8989acdac68SDaniel Vetter  */
drm_send_event(struct drm_device * dev,struct drm_pending_event * e)8999acdac68SDaniel Vetter void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
9009acdac68SDaniel Vetter {
9019acdac68SDaniel Vetter 	unsigned long irqflags;
9029acdac68SDaniel Vetter 
9039acdac68SDaniel Vetter 	spin_lock_irqsave(&dev->event_lock, irqflags);
904a78e7a51SVeera Sundaram Sankaran 	drm_send_event_helper(dev, e, 0);
9059acdac68SDaniel Vetter 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
9069acdac68SDaniel Vetter }
9079acdac68SDaniel Vetter EXPORT_SYMBOL(drm_send_event);
9084748aa16SChris Wilson 
print_size(struct drm_printer * p,const char * stat,const char * region,u64 sz)909686b21b5SRob Clark static void print_size(struct drm_printer *p, const char *stat,
910686b21b5SRob Clark 		       const char *region, u64 sz)
911686b21b5SRob Clark {
912686b21b5SRob Clark 	const char *units[] = {"", " KiB", " MiB"};
913686b21b5SRob Clark 	unsigned u;
914686b21b5SRob Clark 
915686b21b5SRob Clark 	for (u = 0; u < ARRAY_SIZE(units) - 1; u++) {
916686b21b5SRob Clark 		if (sz < SZ_1K)
917686b21b5SRob Clark 			break;
918686b21b5SRob Clark 		sz = div_u64(sz, SZ_1K);
919686b21b5SRob Clark 	}
920686b21b5SRob Clark 
921686b21b5SRob Clark 	drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
922686b21b5SRob Clark }
923686b21b5SRob Clark 
924686b21b5SRob Clark /**
925686b21b5SRob Clark  * drm_print_memory_stats - A helper to print memory stats
926686b21b5SRob Clark  * @p: The printer to print output to
927686b21b5SRob Clark  * @stats: The collected memory stats
928686b21b5SRob Clark  * @supported_status: Bitmask of optional stats which are available
929686b21b5SRob Clark  * @region: The memory region
930686b21b5SRob Clark  *
931686b21b5SRob Clark  */
drm_print_memory_stats(struct drm_printer * p,const struct drm_memory_stats * stats,enum drm_gem_object_status supported_status,const char * region)932686b21b5SRob Clark void drm_print_memory_stats(struct drm_printer *p,
933686b21b5SRob Clark 			    const struct drm_memory_stats *stats,
934686b21b5SRob Clark 			    enum drm_gem_object_status supported_status,
935686b21b5SRob Clark 			    const char *region)
936686b21b5SRob Clark {
937686b21b5SRob Clark 	print_size(p, "total", region, stats->private + stats->shared);
938686b21b5SRob Clark 	print_size(p, "shared", region, stats->shared);
939686b21b5SRob Clark 	print_size(p, "active", region, stats->active);
940686b21b5SRob Clark 
941686b21b5SRob Clark 	if (supported_status & DRM_GEM_OBJECT_RESIDENT)
942686b21b5SRob Clark 		print_size(p, "resident", region, stats->resident);
943686b21b5SRob Clark 
944686b21b5SRob Clark 	if (supported_status & DRM_GEM_OBJECT_PURGEABLE)
945686b21b5SRob Clark 		print_size(p, "purgeable", region, stats->purgeable);
946686b21b5SRob Clark }
947686b21b5SRob Clark EXPORT_SYMBOL(drm_print_memory_stats);
948686b21b5SRob Clark 
949686b21b5SRob Clark /**
950686b21b5SRob Clark  * drm_show_memory_stats - Helper to collect and show standard fdinfo memory stats
951686b21b5SRob Clark  * @p: the printer to print output to
952686b21b5SRob Clark  * @file: the DRM file
953686b21b5SRob Clark  *
954686b21b5SRob Clark  * Helper to iterate over GEM objects with a handle allocated in the specified
955686b21b5SRob Clark  * file.
956686b21b5SRob Clark  */
drm_show_memory_stats(struct drm_printer * p,struct drm_file * file)957686b21b5SRob Clark void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
958686b21b5SRob Clark {
959686b21b5SRob Clark 	struct drm_gem_object *obj;
960686b21b5SRob Clark 	struct drm_memory_stats status = {};
961*2cde325eSTomi Valkeinen 	enum drm_gem_object_status supported_status = 0;
962686b21b5SRob Clark 	int id;
963686b21b5SRob Clark 
964686b21b5SRob Clark 	spin_lock(&file->table_lock);
965686b21b5SRob Clark 	idr_for_each_entry (&file->object_idr, obj, id) {
966686b21b5SRob Clark 		enum drm_gem_object_status s = 0;
967686b21b5SRob Clark 
968686b21b5SRob Clark 		if (obj->funcs && obj->funcs->status) {
969686b21b5SRob Clark 			s = obj->funcs->status(obj);
970686b21b5SRob Clark 			supported_status = DRM_GEM_OBJECT_RESIDENT |
971686b21b5SRob Clark 					DRM_GEM_OBJECT_PURGEABLE;
972686b21b5SRob Clark 		}
973686b21b5SRob Clark 
974686b21b5SRob Clark 		if (obj->handle_count > 1) {
975686b21b5SRob Clark 			status.shared += obj->size;
976686b21b5SRob Clark 		} else {
977686b21b5SRob Clark 			status.private += obj->size;
978686b21b5SRob Clark 		}
979686b21b5SRob Clark 
980686b21b5SRob Clark 		if (s & DRM_GEM_OBJECT_RESIDENT) {
981686b21b5SRob Clark 			status.resident += obj->size;
982686b21b5SRob Clark 		} else {
983686b21b5SRob Clark 			/* If already purged or not yet backed by pages, don't
984686b21b5SRob Clark 			 * count it as purgeable:
985686b21b5SRob Clark 			 */
986686b21b5SRob Clark 			s &= ~DRM_GEM_OBJECT_PURGEABLE;
987686b21b5SRob Clark 		}
988686b21b5SRob Clark 
989686b21b5SRob Clark 		if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
990686b21b5SRob Clark 			status.active += obj->size;
991686b21b5SRob Clark 
992686b21b5SRob Clark 			/* If still active, don't count as purgeable: */
993686b21b5SRob Clark 			s &= ~DRM_GEM_OBJECT_PURGEABLE;
994686b21b5SRob Clark 		}
995686b21b5SRob Clark 
996686b21b5SRob Clark 		if (s & DRM_GEM_OBJECT_PURGEABLE)
997686b21b5SRob Clark 			status.purgeable += obj->size;
998686b21b5SRob Clark 	}
999686b21b5SRob Clark 	spin_unlock(&file->table_lock);
1000686b21b5SRob Clark 
1001686b21b5SRob Clark 	drm_print_memory_stats(p, &status, supported_status, "memory");
1002686b21b5SRob Clark }
1003686b21b5SRob Clark EXPORT_SYMBOL(drm_show_memory_stats);
1004686b21b5SRob Clark 
10054748aa16SChris Wilson /**
10063f09a0cdSRob Clark  * drm_show_fdinfo - helper for drm file fops
1007686b21b5SRob Clark  * @m: output stream
10083f09a0cdSRob Clark  * @f: the device file instance
10093f09a0cdSRob Clark  *
10103f09a0cdSRob Clark  * Helper to implement fdinfo, for userspace to query usage stats, etc, of a
10113f09a0cdSRob Clark  * process using the GPU.  See also &drm_driver.show_fdinfo.
10123f09a0cdSRob Clark  *
10133f09a0cdSRob Clark  * For text output format description please see Documentation/gpu/drm-usage-stats.rst
10143f09a0cdSRob Clark  */
drm_show_fdinfo(struct seq_file * m,struct file * f)10153f09a0cdSRob Clark void drm_show_fdinfo(struct seq_file *m, struct file *f)
10163f09a0cdSRob Clark {
10173f09a0cdSRob Clark 	struct drm_file *file = f->private_data;
10183f09a0cdSRob Clark 	struct drm_device *dev = file->minor->dev;
10193f09a0cdSRob Clark 	struct drm_printer p = drm_seq_file_printer(m);
10203f09a0cdSRob Clark 
10213f09a0cdSRob Clark 	drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
10223f09a0cdSRob Clark 	drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
10233f09a0cdSRob Clark 
10243f09a0cdSRob Clark 	if (dev_is_pci(dev->dev)) {
10253f09a0cdSRob Clark 		struct pci_dev *pdev = to_pci_dev(dev->dev);
10263f09a0cdSRob Clark 
10273f09a0cdSRob Clark 		drm_printf(&p, "drm-pdev:\t%04x:%02x:%02x.%d\n",
10283f09a0cdSRob Clark 			   pci_domain_nr(pdev->bus), pdev->bus->number,
10293f09a0cdSRob Clark 			   PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
10303f09a0cdSRob Clark 	}
10313f09a0cdSRob Clark 
10323f09a0cdSRob Clark 	if (dev->driver->show_fdinfo)
10333f09a0cdSRob Clark 		dev->driver->show_fdinfo(&p, file);
10343f09a0cdSRob Clark }
10353f09a0cdSRob Clark EXPORT_SYMBOL(drm_show_fdinfo);
10363f09a0cdSRob Clark 
10373f09a0cdSRob Clark /**
10384748aa16SChris Wilson  * mock_drm_getfile - Create a new struct file for the drm device
10394748aa16SChris Wilson  * @minor: drm minor to wrap (e.g. #drm_device.primary)
10404748aa16SChris Wilson  * @flags: file creation mode (O_RDWR etc)
10414748aa16SChris Wilson  *
10424748aa16SChris Wilson  * This create a new struct file that wraps a DRM file context around a
10434748aa16SChris Wilson  * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
10444748aa16SChris Wilson  * invoking userspace. The struct file may be operated on using its f_op
10454748aa16SChris Wilson  * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
10464748aa16SChris Wilson  * to userspace facing functions as an internal/anonymous client.
10474748aa16SChris Wilson  *
10484748aa16SChris Wilson  * RETURNS:
10494748aa16SChris Wilson  * Pointer to newly created struct file, ERR_PTR on failure.
10504748aa16SChris Wilson  */
mock_drm_getfile(struct drm_minor * minor,unsigned int flags)10514748aa16SChris Wilson struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
10524748aa16SChris Wilson {
10534748aa16SChris Wilson 	struct drm_device *dev = minor->dev;
10544748aa16SChris Wilson 	struct drm_file *priv;
10554748aa16SChris Wilson 	struct file *file;
10564748aa16SChris Wilson 
10574748aa16SChris Wilson 	priv = drm_file_alloc(minor);
10584748aa16SChris Wilson 	if (IS_ERR(priv))
10594748aa16SChris Wilson 		return ERR_CAST(priv);
10604748aa16SChris Wilson 
10614748aa16SChris Wilson 	file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
10624748aa16SChris Wilson 	if (IS_ERR(file)) {
10634748aa16SChris Wilson 		drm_file_free(priv);
10644748aa16SChris Wilson 		return file;
10654748aa16SChris Wilson 	}
10664748aa16SChris Wilson 
10674748aa16SChris Wilson 	/* Everyone shares a single global address space */
10684748aa16SChris Wilson 	file->f_mapping = dev->anon_inode->i_mapping;
10694748aa16SChris Wilson 
10704748aa16SChris Wilson 	drm_dev_get(dev);
10714748aa16SChris Wilson 	priv->filp = file;
10724748aa16SChris Wilson 
10734748aa16SChris Wilson 	return file;
10744748aa16SChris Wilson }
10754748aa16SChris Wilson EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
1076