xref: /openbmc/linux/drivers/gpu/drm/drm_drv.c (revision 4bce6fce)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/fs.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_core.h>
37 #include "drm_legacy.h"
38 #include "drm_internal.h"
39 
40 unsigned int drm_debug = 0;	/* 1 to enable debug output */
41 EXPORT_SYMBOL(drm_debug);
42 
43 bool drm_atomic = 0;
44 
45 MODULE_AUTHOR(CORE_AUTHOR);
46 MODULE_DESCRIPTION(CORE_DESC);
47 MODULE_LICENSE("GPL and additional rights");
48 MODULE_PARM_DESC(debug, "Enable debug output");
49 MODULE_PARM_DESC(atomic, "Enable experimental atomic KMS API");
50 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
51 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
52 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
53 
54 module_param_named(debug, drm_debug, int, 0600);
55 module_param_named_unsafe(atomic, drm_atomic, bool, 0600);
56 
57 static DEFINE_SPINLOCK(drm_minor_lock);
58 static struct idr drm_minors_idr;
59 
60 struct class *drm_class;
61 static struct dentry *drm_debugfs_root;
62 
63 void drm_err(const char *format, ...)
64 {
65 	struct va_format vaf;
66 	va_list args;
67 
68 	va_start(args, format);
69 
70 	vaf.fmt = format;
71 	vaf.va = &args;
72 
73 	printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
74 	       __builtin_return_address(0), &vaf);
75 
76 	va_end(args);
77 }
78 EXPORT_SYMBOL(drm_err);
79 
80 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
81 {
82 	struct va_format vaf;
83 	va_list args;
84 
85 	va_start(args, format);
86 	vaf.fmt = format;
87 	vaf.va = &args;
88 
89 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
90 
91 	va_end(args);
92 }
93 EXPORT_SYMBOL(drm_ut_debug_printk);
94 
95 #define DRM_MAGIC_HASH_ORDER  4  /**< Size of key hash table. Must be power of 2. */
96 
97 struct drm_master *drm_master_create(struct drm_minor *minor)
98 {
99 	struct drm_master *master;
100 
101 	master = kzalloc(sizeof(*master), GFP_KERNEL);
102 	if (!master)
103 		return NULL;
104 
105 	kref_init(&master->refcount);
106 	spin_lock_init(&master->lock.spinlock);
107 	init_waitqueue_head(&master->lock.lock_queue);
108 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
109 		kfree(master);
110 		return NULL;
111 	}
112 	INIT_LIST_HEAD(&master->magicfree);
113 	master->minor = minor;
114 
115 	return master;
116 }
117 
118 struct drm_master *drm_master_get(struct drm_master *master)
119 {
120 	kref_get(&master->refcount);
121 	return master;
122 }
123 EXPORT_SYMBOL(drm_master_get);
124 
125 static void drm_master_destroy(struct kref *kref)
126 {
127 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
128 	struct drm_device *dev = master->minor->dev;
129 	struct drm_map_list *r_list, *list_temp;
130 
131 	mutex_lock(&dev->struct_mutex);
132 	if (dev->driver->master_destroy)
133 		dev->driver->master_destroy(dev, master);
134 
135 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
136 		if (r_list->master == master) {
137 			drm_legacy_rmmap_locked(dev, r_list->map);
138 			r_list = NULL;
139 		}
140 	}
141 
142 	if (master->unique) {
143 		kfree(master->unique);
144 		master->unique = NULL;
145 		master->unique_len = 0;
146 	}
147 
148 	drm_ht_remove(&master->magiclist);
149 
150 	mutex_unlock(&dev->struct_mutex);
151 	kfree(master);
152 }
153 
154 void drm_master_put(struct drm_master **master)
155 {
156 	kref_put(&(*master)->refcount, drm_master_destroy);
157 	*master = NULL;
158 }
159 EXPORT_SYMBOL(drm_master_put);
160 
161 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
162 			struct drm_file *file_priv)
163 {
164 	int ret = 0;
165 
166 	mutex_lock(&dev->master_mutex);
167 	if (file_priv->is_master)
168 		goto out_unlock;
169 
170 	if (file_priv->minor->master) {
171 		ret = -EINVAL;
172 		goto out_unlock;
173 	}
174 
175 	if (!file_priv->master) {
176 		ret = -EINVAL;
177 		goto out_unlock;
178 	}
179 
180 	file_priv->minor->master = drm_master_get(file_priv->master);
181 	file_priv->is_master = 1;
182 	if (dev->driver->master_set) {
183 		ret = dev->driver->master_set(dev, file_priv, false);
184 		if (unlikely(ret != 0)) {
185 			file_priv->is_master = 0;
186 			drm_master_put(&file_priv->minor->master);
187 		}
188 	}
189 
190 out_unlock:
191 	mutex_unlock(&dev->master_mutex);
192 	return ret;
193 }
194 
195 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
196 			 struct drm_file *file_priv)
197 {
198 	int ret = -EINVAL;
199 
200 	mutex_lock(&dev->master_mutex);
201 	if (!file_priv->is_master)
202 		goto out_unlock;
203 
204 	if (!file_priv->minor->master)
205 		goto out_unlock;
206 
207 	ret = 0;
208 	if (dev->driver->master_drop)
209 		dev->driver->master_drop(dev, file_priv, false);
210 	drm_master_put(&file_priv->minor->master);
211 	file_priv->is_master = 0;
212 
213 out_unlock:
214 	mutex_unlock(&dev->master_mutex);
215 	return ret;
216 }
217 
218 /*
219  * DRM Minors
220  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
221  * of them is represented by a drm_minor object. Depending on the capabilities
222  * of the device-driver, different interfaces are registered.
223  *
224  * Minors can be accessed via dev->$minor_name. This pointer is either
225  * NULL or a valid drm_minor pointer and stays valid as long as the device is
226  * valid. This means, DRM minors have the same life-time as the underlying
227  * device. However, this doesn't mean that the minor is active. Minors are
228  * registered and unregistered dynamically according to device-state.
229  */
230 
231 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
232 					     unsigned int type)
233 {
234 	switch (type) {
235 	case DRM_MINOR_LEGACY:
236 		return &dev->primary;
237 	case DRM_MINOR_RENDER:
238 		return &dev->render;
239 	case DRM_MINOR_CONTROL:
240 		return &dev->control;
241 	default:
242 		return NULL;
243 	}
244 }
245 
246 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
247 {
248 	struct drm_minor *minor;
249 	unsigned long flags;
250 	int r;
251 
252 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
253 	if (!minor)
254 		return -ENOMEM;
255 
256 	minor->type = type;
257 	minor->dev = dev;
258 
259 	idr_preload(GFP_KERNEL);
260 	spin_lock_irqsave(&drm_minor_lock, flags);
261 	r = idr_alloc(&drm_minors_idr,
262 		      NULL,
263 		      64 * type,
264 		      64 * (type + 1),
265 		      GFP_NOWAIT);
266 	spin_unlock_irqrestore(&drm_minor_lock, flags);
267 	idr_preload_end();
268 
269 	if (r < 0)
270 		goto err_free;
271 
272 	minor->index = r;
273 
274 	minor->kdev = drm_sysfs_minor_alloc(minor);
275 	if (IS_ERR(minor->kdev)) {
276 		r = PTR_ERR(minor->kdev);
277 		goto err_index;
278 	}
279 
280 	*drm_minor_get_slot(dev, type) = minor;
281 	return 0;
282 
283 err_index:
284 	spin_lock_irqsave(&drm_minor_lock, flags);
285 	idr_remove(&drm_minors_idr, minor->index);
286 	spin_unlock_irqrestore(&drm_minor_lock, flags);
287 err_free:
288 	kfree(minor);
289 	return r;
290 }
291 
292 static void drm_minor_free(struct drm_device *dev, unsigned int type)
293 {
294 	struct drm_minor **slot, *minor;
295 	unsigned long flags;
296 
297 	slot = drm_minor_get_slot(dev, type);
298 	minor = *slot;
299 	if (!minor)
300 		return;
301 
302 	drm_mode_group_destroy(&minor->mode_group);
303 	put_device(minor->kdev);
304 
305 	spin_lock_irqsave(&drm_minor_lock, flags);
306 	idr_remove(&drm_minors_idr, minor->index);
307 	spin_unlock_irqrestore(&drm_minor_lock, flags);
308 
309 	kfree(minor);
310 	*slot = NULL;
311 }
312 
313 static int drm_minor_register(struct drm_device *dev, unsigned int type)
314 {
315 	struct drm_minor *minor;
316 	unsigned long flags;
317 	int ret;
318 
319 	DRM_DEBUG("\n");
320 
321 	minor = *drm_minor_get_slot(dev, type);
322 	if (!minor)
323 		return 0;
324 
325 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
326 	if (ret) {
327 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
328 		return ret;
329 	}
330 
331 	ret = device_add(minor->kdev);
332 	if (ret)
333 		goto err_debugfs;
334 
335 	/* replace NULL with @minor so lookups will succeed from now on */
336 	spin_lock_irqsave(&drm_minor_lock, flags);
337 	idr_replace(&drm_minors_idr, minor, minor->index);
338 	spin_unlock_irqrestore(&drm_minor_lock, flags);
339 
340 	DRM_DEBUG("new minor registered %d\n", minor->index);
341 	return 0;
342 
343 err_debugfs:
344 	drm_debugfs_cleanup(minor);
345 	return ret;
346 }
347 
348 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
349 {
350 	struct drm_minor *minor;
351 	unsigned long flags;
352 
353 	minor = *drm_minor_get_slot(dev, type);
354 	if (!minor || !device_is_registered(minor->kdev))
355 		return;
356 
357 	/* replace @minor with NULL so lookups will fail from now on */
358 	spin_lock_irqsave(&drm_minor_lock, flags);
359 	idr_replace(&drm_minors_idr, NULL, minor->index);
360 	spin_unlock_irqrestore(&drm_minor_lock, flags);
361 
362 	device_del(minor->kdev);
363 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
364 	drm_debugfs_cleanup(minor);
365 }
366 
367 /**
368  * drm_minor_acquire - Acquire a DRM minor
369  * @minor_id: Minor ID of the DRM-minor
370  *
371  * Looks up the given minor-ID and returns the respective DRM-minor object. The
372  * refence-count of the underlying device is increased so you must release this
373  * object with drm_minor_release().
374  *
375  * As long as you hold this minor, it is guaranteed that the object and the
376  * minor->dev pointer will stay valid! However, the device may get unplugged and
377  * unregistered while you hold the minor.
378  *
379  * Returns:
380  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
381  * failure.
382  */
383 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
384 {
385 	struct drm_minor *minor;
386 	unsigned long flags;
387 
388 	spin_lock_irqsave(&drm_minor_lock, flags);
389 	minor = idr_find(&drm_minors_idr, minor_id);
390 	if (minor)
391 		drm_dev_ref(minor->dev);
392 	spin_unlock_irqrestore(&drm_minor_lock, flags);
393 
394 	if (!minor) {
395 		return ERR_PTR(-ENODEV);
396 	} else if (drm_device_is_unplugged(minor->dev)) {
397 		drm_dev_unref(minor->dev);
398 		return ERR_PTR(-ENODEV);
399 	}
400 
401 	return minor;
402 }
403 
404 /**
405  * drm_minor_release - Release DRM minor
406  * @minor: Pointer to DRM minor object
407  *
408  * Release a minor that was previously acquired via drm_minor_acquire().
409  */
410 void drm_minor_release(struct drm_minor *minor)
411 {
412 	drm_dev_unref(minor->dev);
413 }
414 
415 /**
416  * drm_put_dev - Unregister and release a DRM device
417  * @dev: DRM device
418  *
419  * Called at module unload time or when a PCI device is unplugged.
420  *
421  * Use of this function is discouraged. It will eventually go away completely.
422  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
423  *
424  * Cleans up all DRM device, calling drm_lastclose().
425  */
426 void drm_put_dev(struct drm_device *dev)
427 {
428 	DRM_DEBUG("\n");
429 
430 	if (!dev) {
431 		DRM_ERROR("cleanup called no dev\n");
432 		return;
433 	}
434 
435 	drm_dev_unregister(dev);
436 	drm_dev_unref(dev);
437 }
438 EXPORT_SYMBOL(drm_put_dev);
439 
440 void drm_unplug_dev(struct drm_device *dev)
441 {
442 	/* for a USB device */
443 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
444 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
445 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
446 
447 	mutex_lock(&drm_global_mutex);
448 
449 	drm_device_set_unplugged(dev);
450 
451 	if (dev->open_count == 0) {
452 		drm_put_dev(dev);
453 	}
454 	mutex_unlock(&drm_global_mutex);
455 }
456 EXPORT_SYMBOL(drm_unplug_dev);
457 
458 /*
459  * DRM internal mount
460  * We want to be able to allocate our own "struct address_space" to control
461  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
462  * stand-alone address_space objects, so we need an underlying inode. As there
463  * is no way to allocate an independent inode easily, we need a fake internal
464  * VFS mount-point.
465  *
466  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
467  * frees it again. You are allowed to use iget() and iput() to get references to
468  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
469  * drm_fs_inode_free() call (which does not have to be the last iput()).
470  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
471  * between multiple inode-users. You could, technically, call
472  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
473  * iput(), but this way you'd end up with a new vfsmount for each inode.
474  */
475 
476 static int drm_fs_cnt;
477 static struct vfsmount *drm_fs_mnt;
478 
479 static const struct dentry_operations drm_fs_dops = {
480 	.d_dname	= simple_dname,
481 };
482 
483 static const struct super_operations drm_fs_sops = {
484 	.statfs		= simple_statfs,
485 };
486 
487 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
488 				   const char *dev_name, void *data)
489 {
490 	return mount_pseudo(fs_type,
491 			    "drm:",
492 			    &drm_fs_sops,
493 			    &drm_fs_dops,
494 			    0x010203ff);
495 }
496 
497 static struct file_system_type drm_fs_type = {
498 	.name		= "drm",
499 	.owner		= THIS_MODULE,
500 	.mount		= drm_fs_mount,
501 	.kill_sb	= kill_anon_super,
502 };
503 
504 static struct inode *drm_fs_inode_new(void)
505 {
506 	struct inode *inode;
507 	int r;
508 
509 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
510 	if (r < 0) {
511 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
512 		return ERR_PTR(r);
513 	}
514 
515 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
516 	if (IS_ERR(inode))
517 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
518 
519 	return inode;
520 }
521 
522 static void drm_fs_inode_free(struct inode *inode)
523 {
524 	if (inode) {
525 		iput(inode);
526 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
527 	}
528 }
529 
530 /**
531  * drm_dev_alloc - Allocate new DRM device
532  * @driver: DRM driver to allocate device for
533  * @parent: Parent device object
534  *
535  * Allocate and initialize a new DRM device. No device registration is done.
536  * Call drm_dev_register() to advertice the device to user space and register it
537  * with other core subsystems.
538  *
539  * The initial ref-count of the object is 1. Use drm_dev_ref() and
540  * drm_dev_unref() to take and drop further ref-counts.
541  *
542  * Note that for purely virtual devices @parent can be NULL.
543  *
544  * RETURNS:
545  * Pointer to new DRM device, or NULL if out of memory.
546  */
547 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
548 				 struct device *parent)
549 {
550 	struct drm_device *dev;
551 	int ret;
552 
553 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
554 	if (!dev)
555 		return NULL;
556 
557 	kref_init(&dev->ref);
558 	dev->dev = parent;
559 	dev->driver = driver;
560 
561 	INIT_LIST_HEAD(&dev->filelist);
562 	INIT_LIST_HEAD(&dev->ctxlist);
563 	INIT_LIST_HEAD(&dev->vmalist);
564 	INIT_LIST_HEAD(&dev->maplist);
565 	INIT_LIST_HEAD(&dev->vblank_event_list);
566 
567 	spin_lock_init(&dev->buf_lock);
568 	spin_lock_init(&dev->event_lock);
569 	mutex_init(&dev->struct_mutex);
570 	mutex_init(&dev->ctxlist_mutex);
571 	mutex_init(&dev->master_mutex);
572 
573 	dev->anon_inode = drm_fs_inode_new();
574 	if (IS_ERR(dev->anon_inode)) {
575 		ret = PTR_ERR(dev->anon_inode);
576 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
577 		goto err_free;
578 	}
579 
580 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
581 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
582 		if (ret)
583 			goto err_minors;
584 	}
585 
586 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
587 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
588 		if (ret)
589 			goto err_minors;
590 	}
591 
592 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
593 	if (ret)
594 		goto err_minors;
595 
596 	if (drm_ht_create(&dev->map_hash, 12))
597 		goto err_minors;
598 
599 	ret = drm_legacy_ctxbitmap_init(dev);
600 	if (ret) {
601 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
602 		goto err_ht;
603 	}
604 
605 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
606 		ret = drm_gem_init(dev);
607 		if (ret) {
608 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
609 			goto err_ctxbitmap;
610 		}
611 	}
612 
613 	return dev;
614 
615 err_ctxbitmap:
616 	drm_legacy_ctxbitmap_cleanup(dev);
617 err_ht:
618 	drm_ht_remove(&dev->map_hash);
619 err_minors:
620 	drm_minor_free(dev, DRM_MINOR_LEGACY);
621 	drm_minor_free(dev, DRM_MINOR_RENDER);
622 	drm_minor_free(dev, DRM_MINOR_CONTROL);
623 	drm_fs_inode_free(dev->anon_inode);
624 err_free:
625 	mutex_destroy(&dev->master_mutex);
626 	kfree(dev);
627 	return NULL;
628 }
629 EXPORT_SYMBOL(drm_dev_alloc);
630 
631 static void drm_dev_release(struct kref *ref)
632 {
633 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
634 
635 	if (drm_core_check_feature(dev, DRIVER_GEM))
636 		drm_gem_destroy(dev);
637 
638 	drm_legacy_ctxbitmap_cleanup(dev);
639 	drm_ht_remove(&dev->map_hash);
640 	drm_fs_inode_free(dev->anon_inode);
641 
642 	drm_minor_free(dev, DRM_MINOR_LEGACY);
643 	drm_minor_free(dev, DRM_MINOR_RENDER);
644 	drm_minor_free(dev, DRM_MINOR_CONTROL);
645 
646 	mutex_destroy(&dev->master_mutex);
647 	kfree(dev->unique);
648 	kfree(dev);
649 }
650 
651 /**
652  * drm_dev_ref - Take reference of a DRM device
653  * @dev: device to take reference of or NULL
654  *
655  * This increases the ref-count of @dev by one. You *must* already own a
656  * reference when calling this. Use drm_dev_unref() to drop this reference
657  * again.
658  *
659  * This function never fails. However, this function does not provide *any*
660  * guarantee whether the device is alive or running. It only provides a
661  * reference to the object and the memory associated with it.
662  */
663 void drm_dev_ref(struct drm_device *dev)
664 {
665 	if (dev)
666 		kref_get(&dev->ref);
667 }
668 EXPORT_SYMBOL(drm_dev_ref);
669 
670 /**
671  * drm_dev_unref - Drop reference of a DRM device
672  * @dev: device to drop reference of or NULL
673  *
674  * This decreases the ref-count of @dev by one. The device is destroyed if the
675  * ref-count drops to zero.
676  */
677 void drm_dev_unref(struct drm_device *dev)
678 {
679 	if (dev)
680 		kref_put(&dev->ref, drm_dev_release);
681 }
682 EXPORT_SYMBOL(drm_dev_unref);
683 
684 /**
685  * drm_dev_register - Register DRM device
686  * @dev: Device to register
687  * @flags: Flags passed to the driver's .load() function
688  *
689  * Register the DRM device @dev with the system, advertise device to user-space
690  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
691  * previously.
692  *
693  * Never call this twice on any device!
694  *
695  * RETURNS:
696  * 0 on success, negative error code on failure.
697  */
698 int drm_dev_register(struct drm_device *dev, unsigned long flags)
699 {
700 	int ret;
701 
702 	mutex_lock(&drm_global_mutex);
703 
704 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
705 	if (ret)
706 		goto err_minors;
707 
708 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
709 	if (ret)
710 		goto err_minors;
711 
712 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
713 	if (ret)
714 		goto err_minors;
715 
716 	if (dev->driver->load) {
717 		ret = dev->driver->load(dev, flags);
718 		if (ret)
719 			goto err_minors;
720 	}
721 
722 	/* setup grouping for legacy outputs */
723 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
724 		ret = drm_mode_group_init_legacy_group(dev,
725 				&dev->primary->mode_group);
726 		if (ret)
727 			goto err_unload;
728 	}
729 
730 	ret = 0;
731 	goto out_unlock;
732 
733 err_unload:
734 	if (dev->driver->unload)
735 		dev->driver->unload(dev);
736 err_minors:
737 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
738 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
739 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
740 out_unlock:
741 	mutex_unlock(&drm_global_mutex);
742 	return ret;
743 }
744 EXPORT_SYMBOL(drm_dev_register);
745 
746 /**
747  * drm_dev_unregister - Unregister DRM device
748  * @dev: Device to unregister
749  *
750  * Unregister the DRM device from the system. This does the reverse of
751  * drm_dev_register() but does not deallocate the device. The caller must call
752  * drm_dev_unref() to drop their final reference.
753  */
754 void drm_dev_unregister(struct drm_device *dev)
755 {
756 	struct drm_map_list *r_list, *list_temp;
757 
758 	drm_lastclose(dev);
759 
760 	if (dev->driver->unload)
761 		dev->driver->unload(dev);
762 
763 	if (dev->agp)
764 		drm_pci_agp_destroy(dev);
765 
766 	drm_vblank_cleanup(dev);
767 
768 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
769 		drm_legacy_rmmap(dev, r_list->map);
770 
771 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
772 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
773 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
774 }
775 EXPORT_SYMBOL(drm_dev_unregister);
776 
777 /**
778  * drm_dev_set_unique - Set the unique name of a DRM device
779  * @dev: device of which to set the unique name
780  * @fmt: format string for unique name
781  *
782  * Sets the unique name of a DRM device using the specified format string and
783  * a variable list of arguments. Drivers can use this at driver probe time if
784  * the unique name of the devices they drive is static.
785  *
786  * Return: 0 on success or a negative error code on failure.
787  */
788 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
789 {
790 	va_list ap;
791 
792 	kfree(dev->unique);
793 
794 	va_start(ap, fmt);
795 	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
796 	va_end(ap);
797 
798 	return dev->unique ? 0 : -ENOMEM;
799 }
800 EXPORT_SYMBOL(drm_dev_set_unique);
801 
802 /*
803  * DRM Core
804  * The DRM core module initializes all global DRM objects and makes them
805  * available to drivers. Once setup, drivers can probe their respective
806  * devices.
807  * Currently, core management includes:
808  *  - The "DRM-Global" key/value database
809  *  - Global ID management for connectors
810  *  - DRM major number allocation
811  *  - DRM minor management
812  *  - DRM sysfs class
813  *  - DRM debugfs root
814  *
815  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
816  * interface registered on a DRM device, you can request minor numbers from DRM
817  * core. DRM core takes care of major-number management and char-dev
818  * registration. A stub ->open() callback forwards any open() requests to the
819  * registered minor.
820  */
821 
822 static int drm_stub_open(struct inode *inode, struct file *filp)
823 {
824 	const struct file_operations *new_fops;
825 	struct drm_minor *minor;
826 	int err;
827 
828 	DRM_DEBUG("\n");
829 
830 	mutex_lock(&drm_global_mutex);
831 	minor = drm_minor_acquire(iminor(inode));
832 	if (IS_ERR(minor)) {
833 		err = PTR_ERR(minor);
834 		goto out_unlock;
835 	}
836 
837 	new_fops = fops_get(minor->dev->driver->fops);
838 	if (!new_fops) {
839 		err = -ENODEV;
840 		goto out_release;
841 	}
842 
843 	replace_fops(filp, new_fops);
844 	if (filp->f_op->open)
845 		err = filp->f_op->open(inode, filp);
846 	else
847 		err = 0;
848 
849 out_release:
850 	drm_minor_release(minor);
851 out_unlock:
852 	mutex_unlock(&drm_global_mutex);
853 	return err;
854 }
855 
856 static const struct file_operations drm_stub_fops = {
857 	.owner = THIS_MODULE,
858 	.open = drm_stub_open,
859 	.llseek = noop_llseek,
860 };
861 
862 static int __init drm_core_init(void)
863 {
864 	int ret = -ENOMEM;
865 
866 	drm_global_init();
867 	drm_connector_ida_init();
868 	idr_init(&drm_minors_idr);
869 
870 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
871 		goto err_p1;
872 
873 	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
874 	if (IS_ERR(drm_class)) {
875 		printk(KERN_ERR "DRM: Error creating drm class.\n");
876 		ret = PTR_ERR(drm_class);
877 		goto err_p2;
878 	}
879 
880 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
881 	if (!drm_debugfs_root) {
882 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
883 		ret = -1;
884 		goto err_p3;
885 	}
886 
887 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
888 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
889 	return 0;
890 err_p3:
891 	drm_sysfs_destroy();
892 err_p2:
893 	unregister_chrdev(DRM_MAJOR, "drm");
894 
895 	idr_destroy(&drm_minors_idr);
896 err_p1:
897 	return ret;
898 }
899 
900 static void __exit drm_core_exit(void)
901 {
902 	debugfs_remove(drm_debugfs_root);
903 	drm_sysfs_destroy();
904 
905 	unregister_chrdev(DRM_MAJOR, "drm");
906 
907 	drm_connector_ida_destroy();
908 	idr_destroy(&drm_minors_idr);
909 }
910 
911 module_init(drm_core_init);
912 module_exit(drm_core_exit);
913