xref: /openbmc/linux/drivers/gpu/drm/drm_drv.c (revision 4a44a19b)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/fs.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_core.h>
37 #include "drm_legacy.h"
38 #include "drm_internal.h"
39 
40 unsigned int drm_debug = 0;	/* 1 to enable debug output */
41 EXPORT_SYMBOL(drm_debug);
42 
43 MODULE_AUTHOR(CORE_AUTHOR);
44 MODULE_DESCRIPTION(CORE_DESC);
45 MODULE_LICENSE("GPL and additional rights");
46 MODULE_PARM_DESC(debug, "Enable debug output");
47 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
48 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
49 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
50 
51 module_param_named(debug, drm_debug, int, 0600);
52 
53 static DEFINE_SPINLOCK(drm_minor_lock);
54 static struct idr drm_minors_idr;
55 
56 struct class *drm_class;
57 static struct dentry *drm_debugfs_root;
58 
59 void drm_err(const char *func, const char *format, ...)
60 {
61 	struct va_format vaf;
62 	va_list args;
63 
64 	va_start(args, format);
65 
66 	vaf.fmt = format;
67 	vaf.va = &args;
68 
69 	printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
70 
71 	va_end(args);
72 }
73 EXPORT_SYMBOL(drm_err);
74 
75 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
76 {
77 	struct va_format vaf;
78 	va_list args;
79 
80 	va_start(args, format);
81 	vaf.fmt = format;
82 	vaf.va = &args;
83 
84 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
85 
86 	va_end(args);
87 }
88 EXPORT_SYMBOL(drm_ut_debug_printk);
89 
90 #define DRM_MAGIC_HASH_ORDER  4  /**< Size of key hash table. Must be power of 2. */
91 
92 struct drm_master *drm_master_create(struct drm_minor *minor)
93 {
94 	struct drm_master *master;
95 
96 	master = kzalloc(sizeof(*master), GFP_KERNEL);
97 	if (!master)
98 		return NULL;
99 
100 	kref_init(&master->refcount);
101 	spin_lock_init(&master->lock.spinlock);
102 	init_waitqueue_head(&master->lock.lock_queue);
103 	if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
104 		kfree(master);
105 		return NULL;
106 	}
107 	INIT_LIST_HEAD(&master->magicfree);
108 	master->minor = minor;
109 
110 	return master;
111 }
112 
113 struct drm_master *drm_master_get(struct drm_master *master)
114 {
115 	kref_get(&master->refcount);
116 	return master;
117 }
118 EXPORT_SYMBOL(drm_master_get);
119 
120 static void drm_master_destroy(struct kref *kref)
121 {
122 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
123 	struct drm_device *dev = master->minor->dev;
124 	struct drm_map_list *r_list, *list_temp;
125 
126 	mutex_lock(&dev->struct_mutex);
127 	if (dev->driver->master_destroy)
128 		dev->driver->master_destroy(dev, master);
129 
130 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
131 		if (r_list->master == master) {
132 			drm_legacy_rmmap_locked(dev, r_list->map);
133 			r_list = NULL;
134 		}
135 	}
136 
137 	if (master->unique) {
138 		kfree(master->unique);
139 		master->unique = NULL;
140 		master->unique_len = 0;
141 	}
142 
143 	drm_ht_remove(&master->magiclist);
144 
145 	mutex_unlock(&dev->struct_mutex);
146 	kfree(master);
147 }
148 
149 void drm_master_put(struct drm_master **master)
150 {
151 	kref_put(&(*master)->refcount, drm_master_destroy);
152 	*master = NULL;
153 }
154 EXPORT_SYMBOL(drm_master_put);
155 
156 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
157 			struct drm_file *file_priv)
158 {
159 	int ret = 0;
160 
161 	mutex_lock(&dev->master_mutex);
162 	if (file_priv->is_master)
163 		goto out_unlock;
164 
165 	if (file_priv->minor->master) {
166 		ret = -EINVAL;
167 		goto out_unlock;
168 	}
169 
170 	if (!file_priv->master) {
171 		ret = -EINVAL;
172 		goto out_unlock;
173 	}
174 
175 	file_priv->minor->master = drm_master_get(file_priv->master);
176 	file_priv->is_master = 1;
177 	if (dev->driver->master_set) {
178 		ret = dev->driver->master_set(dev, file_priv, false);
179 		if (unlikely(ret != 0)) {
180 			file_priv->is_master = 0;
181 			drm_master_put(&file_priv->minor->master);
182 		}
183 	}
184 
185 out_unlock:
186 	mutex_unlock(&dev->master_mutex);
187 	return ret;
188 }
189 
190 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
191 			 struct drm_file *file_priv)
192 {
193 	int ret = -EINVAL;
194 
195 	mutex_lock(&dev->master_mutex);
196 	if (!file_priv->is_master)
197 		goto out_unlock;
198 
199 	if (!file_priv->minor->master)
200 		goto out_unlock;
201 
202 	ret = 0;
203 	if (dev->driver->master_drop)
204 		dev->driver->master_drop(dev, file_priv, false);
205 	drm_master_put(&file_priv->minor->master);
206 	file_priv->is_master = 0;
207 
208 out_unlock:
209 	mutex_unlock(&dev->master_mutex);
210 	return ret;
211 }
212 
213 /*
214  * DRM Minors
215  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
216  * of them is represented by a drm_minor object. Depending on the capabilities
217  * of the device-driver, different interfaces are registered.
218  *
219  * Minors can be accessed via dev->$minor_name. This pointer is either
220  * NULL or a valid drm_minor pointer and stays valid as long as the device is
221  * valid. This means, DRM minors have the same life-time as the underlying
222  * device. However, this doesn't mean that the minor is active. Minors are
223  * registered and unregistered dynamically according to device-state.
224  */
225 
226 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
227 					     unsigned int type)
228 {
229 	switch (type) {
230 	case DRM_MINOR_LEGACY:
231 		return &dev->primary;
232 	case DRM_MINOR_RENDER:
233 		return &dev->render;
234 	case DRM_MINOR_CONTROL:
235 		return &dev->control;
236 	default:
237 		return NULL;
238 	}
239 }
240 
241 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
242 {
243 	struct drm_minor *minor;
244 	unsigned long flags;
245 	int r;
246 
247 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
248 	if (!minor)
249 		return -ENOMEM;
250 
251 	minor->type = type;
252 	minor->dev = dev;
253 
254 	idr_preload(GFP_KERNEL);
255 	spin_lock_irqsave(&drm_minor_lock, flags);
256 	r = idr_alloc(&drm_minors_idr,
257 		      NULL,
258 		      64 * type,
259 		      64 * (type + 1),
260 		      GFP_NOWAIT);
261 	spin_unlock_irqrestore(&drm_minor_lock, flags);
262 	idr_preload_end();
263 
264 	if (r < 0)
265 		goto err_free;
266 
267 	minor->index = r;
268 
269 	minor->kdev = drm_sysfs_minor_alloc(minor);
270 	if (IS_ERR(minor->kdev)) {
271 		r = PTR_ERR(minor->kdev);
272 		goto err_index;
273 	}
274 
275 	*drm_minor_get_slot(dev, type) = minor;
276 	return 0;
277 
278 err_index:
279 	spin_lock_irqsave(&drm_minor_lock, flags);
280 	idr_remove(&drm_minors_idr, minor->index);
281 	spin_unlock_irqrestore(&drm_minor_lock, flags);
282 err_free:
283 	kfree(minor);
284 	return r;
285 }
286 
287 static void drm_minor_free(struct drm_device *dev, unsigned int type)
288 {
289 	struct drm_minor **slot, *minor;
290 	unsigned long flags;
291 
292 	slot = drm_minor_get_slot(dev, type);
293 	minor = *slot;
294 	if (!minor)
295 		return;
296 
297 	drm_mode_group_destroy(&minor->mode_group);
298 	put_device(minor->kdev);
299 
300 	spin_lock_irqsave(&drm_minor_lock, flags);
301 	idr_remove(&drm_minors_idr, minor->index);
302 	spin_unlock_irqrestore(&drm_minor_lock, flags);
303 
304 	kfree(minor);
305 	*slot = NULL;
306 }
307 
308 static int drm_minor_register(struct drm_device *dev, unsigned int type)
309 {
310 	struct drm_minor *minor;
311 	unsigned long flags;
312 	int ret;
313 
314 	DRM_DEBUG("\n");
315 
316 	minor = *drm_minor_get_slot(dev, type);
317 	if (!minor)
318 		return 0;
319 
320 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
321 	if (ret) {
322 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
323 		return ret;
324 	}
325 
326 	ret = device_add(minor->kdev);
327 	if (ret)
328 		goto err_debugfs;
329 
330 	/* replace NULL with @minor so lookups will succeed from now on */
331 	spin_lock_irqsave(&drm_minor_lock, flags);
332 	idr_replace(&drm_minors_idr, minor, minor->index);
333 	spin_unlock_irqrestore(&drm_minor_lock, flags);
334 
335 	DRM_DEBUG("new minor registered %d\n", minor->index);
336 	return 0;
337 
338 err_debugfs:
339 	drm_debugfs_cleanup(minor);
340 	return ret;
341 }
342 
343 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
344 {
345 	struct drm_minor *minor;
346 	unsigned long flags;
347 
348 	minor = *drm_minor_get_slot(dev, type);
349 	if (!minor || !device_is_registered(minor->kdev))
350 		return;
351 
352 	/* replace @minor with NULL so lookups will fail from now on */
353 	spin_lock_irqsave(&drm_minor_lock, flags);
354 	idr_replace(&drm_minors_idr, NULL, minor->index);
355 	spin_unlock_irqrestore(&drm_minor_lock, flags);
356 
357 	device_del(minor->kdev);
358 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
359 	drm_debugfs_cleanup(minor);
360 }
361 
362 /**
363  * drm_minor_acquire - Acquire a DRM minor
364  * @minor_id: Minor ID of the DRM-minor
365  *
366  * Looks up the given minor-ID and returns the respective DRM-minor object. The
367  * refence-count of the underlying device is increased so you must release this
368  * object with drm_minor_release().
369  *
370  * As long as you hold this minor, it is guaranteed that the object and the
371  * minor->dev pointer will stay valid! However, the device may get unplugged and
372  * unregistered while you hold the minor.
373  *
374  * Returns:
375  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
376  * failure.
377  */
378 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
379 {
380 	struct drm_minor *minor;
381 	unsigned long flags;
382 
383 	spin_lock_irqsave(&drm_minor_lock, flags);
384 	minor = idr_find(&drm_minors_idr, minor_id);
385 	if (minor)
386 		drm_dev_ref(minor->dev);
387 	spin_unlock_irqrestore(&drm_minor_lock, flags);
388 
389 	if (!minor) {
390 		return ERR_PTR(-ENODEV);
391 	} else if (drm_device_is_unplugged(minor->dev)) {
392 		drm_dev_unref(minor->dev);
393 		return ERR_PTR(-ENODEV);
394 	}
395 
396 	return minor;
397 }
398 
399 /**
400  * drm_minor_release - Release DRM minor
401  * @minor: Pointer to DRM minor object
402  *
403  * Release a minor that was previously acquired via drm_minor_acquire().
404  */
405 void drm_minor_release(struct drm_minor *minor)
406 {
407 	drm_dev_unref(minor->dev);
408 }
409 
410 /**
411  * drm_put_dev - Unregister and release a DRM device
412  * @dev: DRM device
413  *
414  * Called at module unload time or when a PCI device is unplugged.
415  *
416  * Use of this function is discouraged. It will eventually go away completely.
417  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
418  *
419  * Cleans up all DRM device, calling drm_lastclose().
420  */
421 void drm_put_dev(struct drm_device *dev)
422 {
423 	DRM_DEBUG("\n");
424 
425 	if (!dev) {
426 		DRM_ERROR("cleanup called no dev\n");
427 		return;
428 	}
429 
430 	drm_dev_unregister(dev);
431 	drm_dev_unref(dev);
432 }
433 EXPORT_SYMBOL(drm_put_dev);
434 
435 void drm_unplug_dev(struct drm_device *dev)
436 {
437 	/* for a USB device */
438 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
439 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
440 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
441 
442 	mutex_lock(&drm_global_mutex);
443 
444 	drm_device_set_unplugged(dev);
445 
446 	if (dev->open_count == 0) {
447 		drm_put_dev(dev);
448 	}
449 	mutex_unlock(&drm_global_mutex);
450 }
451 EXPORT_SYMBOL(drm_unplug_dev);
452 
453 /*
454  * DRM internal mount
455  * We want to be able to allocate our own "struct address_space" to control
456  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
457  * stand-alone address_space objects, so we need an underlying inode. As there
458  * is no way to allocate an independent inode easily, we need a fake internal
459  * VFS mount-point.
460  *
461  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
462  * frees it again. You are allowed to use iget() and iput() to get references to
463  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
464  * drm_fs_inode_free() call (which does not have to be the last iput()).
465  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
466  * between multiple inode-users. You could, technically, call
467  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
468  * iput(), but this way you'd end up with a new vfsmount for each inode.
469  */
470 
471 static int drm_fs_cnt;
472 static struct vfsmount *drm_fs_mnt;
473 
474 static const struct dentry_operations drm_fs_dops = {
475 	.d_dname	= simple_dname,
476 };
477 
478 static const struct super_operations drm_fs_sops = {
479 	.statfs		= simple_statfs,
480 };
481 
482 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
483 				   const char *dev_name, void *data)
484 {
485 	return mount_pseudo(fs_type,
486 			    "drm:",
487 			    &drm_fs_sops,
488 			    &drm_fs_dops,
489 			    0x010203ff);
490 }
491 
492 static struct file_system_type drm_fs_type = {
493 	.name		= "drm",
494 	.owner		= THIS_MODULE,
495 	.mount		= drm_fs_mount,
496 	.kill_sb	= kill_anon_super,
497 };
498 
499 static struct inode *drm_fs_inode_new(void)
500 {
501 	struct inode *inode;
502 	int r;
503 
504 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
505 	if (r < 0) {
506 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
507 		return ERR_PTR(r);
508 	}
509 
510 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
511 	if (IS_ERR(inode))
512 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
513 
514 	return inode;
515 }
516 
517 static void drm_fs_inode_free(struct inode *inode)
518 {
519 	if (inode) {
520 		iput(inode);
521 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
522 	}
523 }
524 
525 /**
526  * drm_dev_alloc - Allocate new DRM device
527  * @driver: DRM driver to allocate device for
528  * @parent: Parent device object
529  *
530  * Allocate and initialize a new DRM device. No device registration is done.
531  * Call drm_dev_register() to advertice the device to user space and register it
532  * with other core subsystems.
533  *
534  * The initial ref-count of the object is 1. Use drm_dev_ref() and
535  * drm_dev_unref() to take and drop further ref-counts.
536  *
537  * RETURNS:
538  * Pointer to new DRM device, or NULL if out of memory.
539  */
540 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
541 				 struct device *parent)
542 {
543 	struct drm_device *dev;
544 	int ret;
545 
546 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
547 	if (!dev)
548 		return NULL;
549 
550 	kref_init(&dev->ref);
551 	dev->dev = parent;
552 	dev->driver = driver;
553 
554 	INIT_LIST_HEAD(&dev->filelist);
555 	INIT_LIST_HEAD(&dev->ctxlist);
556 	INIT_LIST_HEAD(&dev->vmalist);
557 	INIT_LIST_HEAD(&dev->maplist);
558 	INIT_LIST_HEAD(&dev->vblank_event_list);
559 
560 	spin_lock_init(&dev->buf_lock);
561 	spin_lock_init(&dev->event_lock);
562 	mutex_init(&dev->struct_mutex);
563 	mutex_init(&dev->ctxlist_mutex);
564 	mutex_init(&dev->master_mutex);
565 
566 	dev->anon_inode = drm_fs_inode_new();
567 	if (IS_ERR(dev->anon_inode)) {
568 		ret = PTR_ERR(dev->anon_inode);
569 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
570 		goto err_free;
571 	}
572 
573 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
574 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
575 		if (ret)
576 			goto err_minors;
577 	}
578 
579 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
580 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
581 		if (ret)
582 			goto err_minors;
583 	}
584 
585 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
586 	if (ret)
587 		goto err_minors;
588 
589 	if (drm_ht_create(&dev->map_hash, 12))
590 		goto err_minors;
591 
592 	ret = drm_legacy_ctxbitmap_init(dev);
593 	if (ret) {
594 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
595 		goto err_ht;
596 	}
597 
598 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
599 		ret = drm_gem_init(dev);
600 		if (ret) {
601 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
602 			goto err_ctxbitmap;
603 		}
604 	}
605 
606 	return dev;
607 
608 err_ctxbitmap:
609 	drm_legacy_ctxbitmap_cleanup(dev);
610 err_ht:
611 	drm_ht_remove(&dev->map_hash);
612 err_minors:
613 	drm_minor_free(dev, DRM_MINOR_LEGACY);
614 	drm_minor_free(dev, DRM_MINOR_RENDER);
615 	drm_minor_free(dev, DRM_MINOR_CONTROL);
616 	drm_fs_inode_free(dev->anon_inode);
617 err_free:
618 	mutex_destroy(&dev->master_mutex);
619 	kfree(dev);
620 	return NULL;
621 }
622 EXPORT_SYMBOL(drm_dev_alloc);
623 
624 static void drm_dev_release(struct kref *ref)
625 {
626 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
627 
628 	if (drm_core_check_feature(dev, DRIVER_GEM))
629 		drm_gem_destroy(dev);
630 
631 	drm_legacy_ctxbitmap_cleanup(dev);
632 	drm_ht_remove(&dev->map_hash);
633 	drm_fs_inode_free(dev->anon_inode);
634 
635 	drm_minor_free(dev, DRM_MINOR_LEGACY);
636 	drm_minor_free(dev, DRM_MINOR_RENDER);
637 	drm_minor_free(dev, DRM_MINOR_CONTROL);
638 
639 	mutex_destroy(&dev->master_mutex);
640 	kfree(dev->unique);
641 	kfree(dev);
642 }
643 
644 /**
645  * drm_dev_ref - Take reference of a DRM device
646  * @dev: device to take reference of or NULL
647  *
648  * This increases the ref-count of @dev by one. You *must* already own a
649  * reference when calling this. Use drm_dev_unref() to drop this reference
650  * again.
651  *
652  * This function never fails. However, this function does not provide *any*
653  * guarantee whether the device is alive or running. It only provides a
654  * reference to the object and the memory associated with it.
655  */
656 void drm_dev_ref(struct drm_device *dev)
657 {
658 	if (dev)
659 		kref_get(&dev->ref);
660 }
661 EXPORT_SYMBOL(drm_dev_ref);
662 
663 /**
664  * drm_dev_unref - Drop reference of a DRM device
665  * @dev: device to drop reference of or NULL
666  *
667  * This decreases the ref-count of @dev by one. The device is destroyed if the
668  * ref-count drops to zero.
669  */
670 void drm_dev_unref(struct drm_device *dev)
671 {
672 	if (dev)
673 		kref_put(&dev->ref, drm_dev_release);
674 }
675 EXPORT_SYMBOL(drm_dev_unref);
676 
677 /**
678  * drm_dev_register - Register DRM device
679  * @dev: Device to register
680  * @flags: Flags passed to the driver's .load() function
681  *
682  * Register the DRM device @dev with the system, advertise device to user-space
683  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
684  * previously.
685  *
686  * Never call this twice on any device!
687  *
688  * RETURNS:
689  * 0 on success, negative error code on failure.
690  */
691 int drm_dev_register(struct drm_device *dev, unsigned long flags)
692 {
693 	int ret;
694 
695 	mutex_lock(&drm_global_mutex);
696 
697 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
698 	if (ret)
699 		goto err_minors;
700 
701 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
702 	if (ret)
703 		goto err_minors;
704 
705 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
706 	if (ret)
707 		goto err_minors;
708 
709 	if (dev->driver->load) {
710 		ret = dev->driver->load(dev, flags);
711 		if (ret)
712 			goto err_minors;
713 	}
714 
715 	/* setup grouping for legacy outputs */
716 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
717 		ret = drm_mode_group_init_legacy_group(dev,
718 				&dev->primary->mode_group);
719 		if (ret)
720 			goto err_unload;
721 	}
722 
723 	ret = 0;
724 	goto out_unlock;
725 
726 err_unload:
727 	if (dev->driver->unload)
728 		dev->driver->unload(dev);
729 err_minors:
730 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
731 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
732 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
733 out_unlock:
734 	mutex_unlock(&drm_global_mutex);
735 	return ret;
736 }
737 EXPORT_SYMBOL(drm_dev_register);
738 
739 /**
740  * drm_dev_unregister - Unregister DRM device
741  * @dev: Device to unregister
742  *
743  * Unregister the DRM device from the system. This does the reverse of
744  * drm_dev_register() but does not deallocate the device. The caller must call
745  * drm_dev_unref() to drop their final reference.
746  */
747 void drm_dev_unregister(struct drm_device *dev)
748 {
749 	struct drm_map_list *r_list, *list_temp;
750 
751 	drm_lastclose(dev);
752 
753 	if (dev->driver->unload)
754 		dev->driver->unload(dev);
755 
756 	if (dev->agp)
757 		drm_pci_agp_destroy(dev);
758 
759 	drm_vblank_cleanup(dev);
760 
761 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
762 		drm_legacy_rmmap(dev, r_list->map);
763 
764 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
765 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
766 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
767 }
768 EXPORT_SYMBOL(drm_dev_unregister);
769 
770 /**
771  * drm_dev_set_unique - Set the unique name of a DRM device
772  * @dev: device of which to set the unique name
773  * @fmt: format string for unique name
774  *
775  * Sets the unique name of a DRM device using the specified format string and
776  * a variable list of arguments. Drivers can use this at driver probe time if
777  * the unique name of the devices they drive is static.
778  *
779  * Return: 0 on success or a negative error code on failure.
780  */
781 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
782 {
783 	va_list ap;
784 
785 	kfree(dev->unique);
786 
787 	va_start(ap, fmt);
788 	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
789 	va_end(ap);
790 
791 	return dev->unique ? 0 : -ENOMEM;
792 }
793 EXPORT_SYMBOL(drm_dev_set_unique);
794 
795 /*
796  * DRM Core
797  * The DRM core module initializes all global DRM objects and makes them
798  * available to drivers. Once setup, drivers can probe their respective
799  * devices.
800  * Currently, core management includes:
801  *  - The "DRM-Global" key/value database
802  *  - Global ID management for connectors
803  *  - DRM major number allocation
804  *  - DRM minor management
805  *  - DRM sysfs class
806  *  - DRM debugfs root
807  *
808  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
809  * interface registered on a DRM device, you can request minor numbers from DRM
810  * core. DRM core takes care of major-number management and char-dev
811  * registration. A stub ->open() callback forwards any open() requests to the
812  * registered minor.
813  */
814 
815 static int drm_stub_open(struct inode *inode, struct file *filp)
816 {
817 	const struct file_operations *new_fops;
818 	struct drm_minor *minor;
819 	int err;
820 
821 	DRM_DEBUG("\n");
822 
823 	mutex_lock(&drm_global_mutex);
824 	minor = drm_minor_acquire(iminor(inode));
825 	if (IS_ERR(minor)) {
826 		err = PTR_ERR(minor);
827 		goto out_unlock;
828 	}
829 
830 	new_fops = fops_get(minor->dev->driver->fops);
831 	if (!new_fops) {
832 		err = -ENODEV;
833 		goto out_release;
834 	}
835 
836 	replace_fops(filp, new_fops);
837 	if (filp->f_op->open)
838 		err = filp->f_op->open(inode, filp);
839 	else
840 		err = 0;
841 
842 out_release:
843 	drm_minor_release(minor);
844 out_unlock:
845 	mutex_unlock(&drm_global_mutex);
846 	return err;
847 }
848 
849 static const struct file_operations drm_stub_fops = {
850 	.owner = THIS_MODULE,
851 	.open = drm_stub_open,
852 	.llseek = noop_llseek,
853 };
854 
855 static int __init drm_core_init(void)
856 {
857 	int ret = -ENOMEM;
858 
859 	drm_global_init();
860 	drm_connector_ida_init();
861 	idr_init(&drm_minors_idr);
862 
863 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
864 		goto err_p1;
865 
866 	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
867 	if (IS_ERR(drm_class)) {
868 		printk(KERN_ERR "DRM: Error creating drm class.\n");
869 		ret = PTR_ERR(drm_class);
870 		goto err_p2;
871 	}
872 
873 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
874 	if (!drm_debugfs_root) {
875 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
876 		ret = -1;
877 		goto err_p3;
878 	}
879 
880 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
881 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
882 	return 0;
883 err_p3:
884 	drm_sysfs_destroy();
885 err_p2:
886 	unregister_chrdev(DRM_MAJOR, "drm");
887 
888 	idr_destroy(&drm_minors_idr);
889 err_p1:
890 	return ret;
891 }
892 
893 static void __exit drm_core_exit(void)
894 {
895 	debugfs_remove(drm_debugfs_root);
896 	drm_sysfs_destroy();
897 
898 	unregister_chrdev(DRM_MAJOR, "drm");
899 
900 	drm_connector_ida_destroy();
901 	idr_destroy(&drm_minors_idr);
902 }
903 
904 module_init(drm_core_init);
905 module_exit(drm_core_exit);
906