xref: /openbmc/linux/drivers/gpu/drm/drm_drv.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3  *
4  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5  * All Rights Reserved.
6  *
7  * Author Rickard E. (Rik) Faith <faith@valinux.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/fs.h>
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_core.h>
37 #include "drm_legacy.h"
38 #include "drm_internal.h"
39 
40 unsigned int drm_debug = 0;	/* 1 to enable debug output */
41 EXPORT_SYMBOL(drm_debug);
42 
43 bool drm_atomic = 0;
44 
45 MODULE_AUTHOR(CORE_AUTHOR);
46 MODULE_DESCRIPTION(CORE_DESC);
47 MODULE_LICENSE("GPL and additional rights");
48 MODULE_PARM_DESC(debug, "Enable debug output");
49 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
50 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
51 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
52 
53 module_param_named(debug, drm_debug, int, 0600);
54 
55 static DEFINE_SPINLOCK(drm_minor_lock);
56 static struct idr drm_minors_idr;
57 
58 struct class *drm_class;
59 static struct dentry *drm_debugfs_root;
60 
61 void drm_err(const char *format, ...)
62 {
63 	struct va_format vaf;
64 	va_list args;
65 
66 	va_start(args, format);
67 
68 	vaf.fmt = format;
69 	vaf.va = &args;
70 
71 	printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
72 	       __builtin_return_address(0), &vaf);
73 
74 	va_end(args);
75 }
76 EXPORT_SYMBOL(drm_err);
77 
78 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
79 {
80 	struct va_format vaf;
81 	va_list args;
82 
83 	va_start(args, format);
84 	vaf.fmt = format;
85 	vaf.va = &args;
86 
87 	printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
88 
89 	va_end(args);
90 }
91 EXPORT_SYMBOL(drm_ut_debug_printk);
92 
93 struct drm_master *drm_master_create(struct drm_minor *minor)
94 {
95 	struct drm_master *master;
96 
97 	master = kzalloc(sizeof(*master), GFP_KERNEL);
98 	if (!master)
99 		return NULL;
100 
101 	kref_init(&master->refcount);
102 	spin_lock_init(&master->lock.spinlock);
103 	init_waitqueue_head(&master->lock.lock_queue);
104 	idr_init(&master->magic_map);
105 	master->minor = minor;
106 
107 	return master;
108 }
109 
110 struct drm_master *drm_master_get(struct drm_master *master)
111 {
112 	kref_get(&master->refcount);
113 	return master;
114 }
115 EXPORT_SYMBOL(drm_master_get);
116 
117 static void drm_master_destroy(struct kref *kref)
118 {
119 	struct drm_master *master = container_of(kref, struct drm_master, refcount);
120 	struct drm_device *dev = master->minor->dev;
121 	struct drm_map_list *r_list, *list_temp;
122 
123 	mutex_lock(&dev->struct_mutex);
124 	if (dev->driver->master_destroy)
125 		dev->driver->master_destroy(dev, master);
126 
127 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
128 		if (r_list->master == master) {
129 			drm_legacy_rmmap_locked(dev, r_list->map);
130 			r_list = NULL;
131 		}
132 	}
133 	mutex_unlock(&dev->struct_mutex);
134 
135 	idr_destroy(&master->magic_map);
136 	kfree(master->unique);
137 	kfree(master);
138 }
139 
140 void drm_master_put(struct drm_master **master)
141 {
142 	kref_put(&(*master)->refcount, drm_master_destroy);
143 	*master = NULL;
144 }
145 EXPORT_SYMBOL(drm_master_put);
146 
147 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
148 			struct drm_file *file_priv)
149 {
150 	int ret = 0;
151 
152 	mutex_lock(&dev->master_mutex);
153 	if (file_priv->is_master)
154 		goto out_unlock;
155 
156 	if (file_priv->minor->master) {
157 		ret = -EINVAL;
158 		goto out_unlock;
159 	}
160 
161 	if (!file_priv->master) {
162 		ret = -EINVAL;
163 		goto out_unlock;
164 	}
165 
166 	file_priv->minor->master = drm_master_get(file_priv->master);
167 	file_priv->is_master = 1;
168 	if (dev->driver->master_set) {
169 		ret = dev->driver->master_set(dev, file_priv, false);
170 		if (unlikely(ret != 0)) {
171 			file_priv->is_master = 0;
172 			drm_master_put(&file_priv->minor->master);
173 		}
174 	}
175 
176 out_unlock:
177 	mutex_unlock(&dev->master_mutex);
178 	return ret;
179 }
180 
181 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
182 			 struct drm_file *file_priv)
183 {
184 	int ret = -EINVAL;
185 
186 	mutex_lock(&dev->master_mutex);
187 	if (!file_priv->is_master)
188 		goto out_unlock;
189 
190 	if (!file_priv->minor->master)
191 		goto out_unlock;
192 
193 	ret = 0;
194 	if (dev->driver->master_drop)
195 		dev->driver->master_drop(dev, file_priv, false);
196 	drm_master_put(&file_priv->minor->master);
197 	file_priv->is_master = 0;
198 
199 out_unlock:
200 	mutex_unlock(&dev->master_mutex);
201 	return ret;
202 }
203 
204 /*
205  * DRM Minors
206  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
207  * of them is represented by a drm_minor object. Depending on the capabilities
208  * of the device-driver, different interfaces are registered.
209  *
210  * Minors can be accessed via dev->$minor_name. This pointer is either
211  * NULL or a valid drm_minor pointer and stays valid as long as the device is
212  * valid. This means, DRM minors have the same life-time as the underlying
213  * device. However, this doesn't mean that the minor is active. Minors are
214  * registered and unregistered dynamically according to device-state.
215  */
216 
217 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
218 					     unsigned int type)
219 {
220 	switch (type) {
221 	case DRM_MINOR_LEGACY:
222 		return &dev->primary;
223 	case DRM_MINOR_RENDER:
224 		return &dev->render;
225 	case DRM_MINOR_CONTROL:
226 		return &dev->control;
227 	default:
228 		return NULL;
229 	}
230 }
231 
232 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
233 {
234 	struct drm_minor *minor;
235 	unsigned long flags;
236 	int r;
237 
238 	minor = kzalloc(sizeof(*minor), GFP_KERNEL);
239 	if (!minor)
240 		return -ENOMEM;
241 
242 	minor->type = type;
243 	minor->dev = dev;
244 
245 	idr_preload(GFP_KERNEL);
246 	spin_lock_irqsave(&drm_minor_lock, flags);
247 	r = idr_alloc(&drm_minors_idr,
248 		      NULL,
249 		      64 * type,
250 		      64 * (type + 1),
251 		      GFP_NOWAIT);
252 	spin_unlock_irqrestore(&drm_minor_lock, flags);
253 	idr_preload_end();
254 
255 	if (r < 0)
256 		goto err_free;
257 
258 	minor->index = r;
259 
260 	minor->kdev = drm_sysfs_minor_alloc(minor);
261 	if (IS_ERR(minor->kdev)) {
262 		r = PTR_ERR(minor->kdev);
263 		goto err_index;
264 	}
265 
266 	*drm_minor_get_slot(dev, type) = minor;
267 	return 0;
268 
269 err_index:
270 	spin_lock_irqsave(&drm_minor_lock, flags);
271 	idr_remove(&drm_minors_idr, minor->index);
272 	spin_unlock_irqrestore(&drm_minor_lock, flags);
273 err_free:
274 	kfree(minor);
275 	return r;
276 }
277 
278 static void drm_minor_free(struct drm_device *dev, unsigned int type)
279 {
280 	struct drm_minor **slot, *minor;
281 	unsigned long flags;
282 
283 	slot = drm_minor_get_slot(dev, type);
284 	minor = *slot;
285 	if (!minor)
286 		return;
287 
288 	put_device(minor->kdev);
289 
290 	spin_lock_irqsave(&drm_minor_lock, flags);
291 	idr_remove(&drm_minors_idr, minor->index);
292 	spin_unlock_irqrestore(&drm_minor_lock, flags);
293 
294 	kfree(minor);
295 	*slot = NULL;
296 }
297 
298 static int drm_minor_register(struct drm_device *dev, unsigned int type)
299 {
300 	struct drm_minor *minor;
301 	unsigned long flags;
302 	int ret;
303 
304 	DRM_DEBUG("\n");
305 
306 	minor = *drm_minor_get_slot(dev, type);
307 	if (!minor)
308 		return 0;
309 
310 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
311 	if (ret) {
312 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
313 		return ret;
314 	}
315 
316 	ret = device_add(minor->kdev);
317 	if (ret)
318 		goto err_debugfs;
319 
320 	/* replace NULL with @minor so lookups will succeed from now on */
321 	spin_lock_irqsave(&drm_minor_lock, flags);
322 	idr_replace(&drm_minors_idr, minor, minor->index);
323 	spin_unlock_irqrestore(&drm_minor_lock, flags);
324 
325 	DRM_DEBUG("new minor registered %d\n", minor->index);
326 	return 0;
327 
328 err_debugfs:
329 	drm_debugfs_cleanup(minor);
330 	return ret;
331 }
332 
333 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
334 {
335 	struct drm_minor *minor;
336 	unsigned long flags;
337 
338 	minor = *drm_minor_get_slot(dev, type);
339 	if (!minor || !device_is_registered(minor->kdev))
340 		return;
341 
342 	/* replace @minor with NULL so lookups will fail from now on */
343 	spin_lock_irqsave(&drm_minor_lock, flags);
344 	idr_replace(&drm_minors_idr, NULL, minor->index);
345 	spin_unlock_irqrestore(&drm_minor_lock, flags);
346 
347 	device_del(minor->kdev);
348 	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
349 	drm_debugfs_cleanup(minor);
350 }
351 
352 /**
353  * drm_minor_acquire - Acquire a DRM minor
354  * @minor_id: Minor ID of the DRM-minor
355  *
356  * Looks up the given minor-ID and returns the respective DRM-minor object. The
357  * refence-count of the underlying device is increased so you must release this
358  * object with drm_minor_release().
359  *
360  * As long as you hold this minor, it is guaranteed that the object and the
361  * minor->dev pointer will stay valid! However, the device may get unplugged and
362  * unregistered while you hold the minor.
363  *
364  * Returns:
365  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
366  * failure.
367  */
368 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
369 {
370 	struct drm_minor *minor;
371 	unsigned long flags;
372 
373 	spin_lock_irqsave(&drm_minor_lock, flags);
374 	minor = idr_find(&drm_minors_idr, minor_id);
375 	if (minor)
376 		drm_dev_ref(minor->dev);
377 	spin_unlock_irqrestore(&drm_minor_lock, flags);
378 
379 	if (!minor) {
380 		return ERR_PTR(-ENODEV);
381 	} else if (drm_device_is_unplugged(minor->dev)) {
382 		drm_dev_unref(minor->dev);
383 		return ERR_PTR(-ENODEV);
384 	}
385 
386 	return minor;
387 }
388 
389 /**
390  * drm_minor_release - Release DRM minor
391  * @minor: Pointer to DRM minor object
392  *
393  * Release a minor that was previously acquired via drm_minor_acquire().
394  */
395 void drm_minor_release(struct drm_minor *minor)
396 {
397 	drm_dev_unref(minor->dev);
398 }
399 
400 /**
401  * drm_put_dev - Unregister and release a DRM device
402  * @dev: DRM device
403  *
404  * Called at module unload time or when a PCI device is unplugged.
405  *
406  * Use of this function is discouraged. It will eventually go away completely.
407  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
408  *
409  * Cleans up all DRM device, calling drm_lastclose().
410  */
411 void drm_put_dev(struct drm_device *dev)
412 {
413 	DRM_DEBUG("\n");
414 
415 	if (!dev) {
416 		DRM_ERROR("cleanup called no dev\n");
417 		return;
418 	}
419 
420 	drm_dev_unregister(dev);
421 	drm_dev_unref(dev);
422 }
423 EXPORT_SYMBOL(drm_put_dev);
424 
425 void drm_unplug_dev(struct drm_device *dev)
426 {
427 	/* for a USB device */
428 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
429 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
430 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
431 
432 	mutex_lock(&drm_global_mutex);
433 
434 	drm_device_set_unplugged(dev);
435 
436 	if (dev->open_count == 0) {
437 		drm_put_dev(dev);
438 	}
439 	mutex_unlock(&drm_global_mutex);
440 }
441 EXPORT_SYMBOL(drm_unplug_dev);
442 
443 /*
444  * DRM internal mount
445  * We want to be able to allocate our own "struct address_space" to control
446  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
447  * stand-alone address_space objects, so we need an underlying inode. As there
448  * is no way to allocate an independent inode easily, we need a fake internal
449  * VFS mount-point.
450  *
451  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
452  * frees it again. You are allowed to use iget() and iput() to get references to
453  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
454  * drm_fs_inode_free() call (which does not have to be the last iput()).
455  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
456  * between multiple inode-users. You could, technically, call
457  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
458  * iput(), but this way you'd end up with a new vfsmount for each inode.
459  */
460 
461 static int drm_fs_cnt;
462 static struct vfsmount *drm_fs_mnt;
463 
464 static const struct dentry_operations drm_fs_dops = {
465 	.d_dname	= simple_dname,
466 };
467 
468 static const struct super_operations drm_fs_sops = {
469 	.statfs		= simple_statfs,
470 };
471 
472 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
473 				   const char *dev_name, void *data)
474 {
475 	return mount_pseudo(fs_type,
476 			    "drm:",
477 			    &drm_fs_sops,
478 			    &drm_fs_dops,
479 			    0x010203ff);
480 }
481 
482 static struct file_system_type drm_fs_type = {
483 	.name		= "drm",
484 	.owner		= THIS_MODULE,
485 	.mount		= drm_fs_mount,
486 	.kill_sb	= kill_anon_super,
487 };
488 
489 static struct inode *drm_fs_inode_new(void)
490 {
491 	struct inode *inode;
492 	int r;
493 
494 	r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
495 	if (r < 0) {
496 		DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
497 		return ERR_PTR(r);
498 	}
499 
500 	inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
501 	if (IS_ERR(inode))
502 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
503 
504 	return inode;
505 }
506 
507 static void drm_fs_inode_free(struct inode *inode)
508 {
509 	if (inode) {
510 		iput(inode);
511 		simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
512 	}
513 }
514 
515 /**
516  * drm_dev_alloc - Allocate new DRM device
517  * @driver: DRM driver to allocate device for
518  * @parent: Parent device object
519  *
520  * Allocate and initialize a new DRM device. No device registration is done.
521  * Call drm_dev_register() to advertice the device to user space and register it
522  * with other core subsystems.
523  *
524  * The initial ref-count of the object is 1. Use drm_dev_ref() and
525  * drm_dev_unref() to take and drop further ref-counts.
526  *
527  * Note that for purely virtual devices @parent can be NULL.
528  *
529  * RETURNS:
530  * Pointer to new DRM device, or NULL if out of memory.
531  */
532 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
533 				 struct device *parent)
534 {
535 	struct drm_device *dev;
536 	int ret;
537 
538 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
539 	if (!dev)
540 		return NULL;
541 
542 	kref_init(&dev->ref);
543 	dev->dev = parent;
544 	dev->driver = driver;
545 
546 	INIT_LIST_HEAD(&dev->filelist);
547 	INIT_LIST_HEAD(&dev->ctxlist);
548 	INIT_LIST_HEAD(&dev->vmalist);
549 	INIT_LIST_HEAD(&dev->maplist);
550 	INIT_LIST_HEAD(&dev->vblank_event_list);
551 
552 	spin_lock_init(&dev->buf_lock);
553 	spin_lock_init(&dev->event_lock);
554 	mutex_init(&dev->struct_mutex);
555 	mutex_init(&dev->ctxlist_mutex);
556 	mutex_init(&dev->master_mutex);
557 
558 	dev->anon_inode = drm_fs_inode_new();
559 	if (IS_ERR(dev->anon_inode)) {
560 		ret = PTR_ERR(dev->anon_inode);
561 		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
562 		goto err_free;
563 	}
564 
565 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
566 		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
567 		if (ret)
568 			goto err_minors;
569 	}
570 
571 	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
572 		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
573 		if (ret)
574 			goto err_minors;
575 	}
576 
577 	ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
578 	if (ret)
579 		goto err_minors;
580 
581 	if (drm_ht_create(&dev->map_hash, 12))
582 		goto err_minors;
583 
584 	drm_legacy_ctxbitmap_init(dev);
585 
586 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
587 		ret = drm_gem_init(dev);
588 		if (ret) {
589 			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
590 			goto err_ctxbitmap;
591 		}
592 	}
593 
594 	return dev;
595 
596 err_ctxbitmap:
597 	drm_legacy_ctxbitmap_cleanup(dev);
598 	drm_ht_remove(&dev->map_hash);
599 err_minors:
600 	drm_minor_free(dev, DRM_MINOR_LEGACY);
601 	drm_minor_free(dev, DRM_MINOR_RENDER);
602 	drm_minor_free(dev, DRM_MINOR_CONTROL);
603 	drm_fs_inode_free(dev->anon_inode);
604 err_free:
605 	mutex_destroy(&dev->master_mutex);
606 	kfree(dev);
607 	return NULL;
608 }
609 EXPORT_SYMBOL(drm_dev_alloc);
610 
611 static void drm_dev_release(struct kref *ref)
612 {
613 	struct drm_device *dev = container_of(ref, struct drm_device, ref);
614 
615 	if (drm_core_check_feature(dev, DRIVER_GEM))
616 		drm_gem_destroy(dev);
617 
618 	drm_legacy_ctxbitmap_cleanup(dev);
619 	drm_ht_remove(&dev->map_hash);
620 	drm_fs_inode_free(dev->anon_inode);
621 
622 	drm_minor_free(dev, DRM_MINOR_LEGACY);
623 	drm_minor_free(dev, DRM_MINOR_RENDER);
624 	drm_minor_free(dev, DRM_MINOR_CONTROL);
625 
626 	mutex_destroy(&dev->master_mutex);
627 	kfree(dev->unique);
628 	kfree(dev);
629 }
630 
631 /**
632  * drm_dev_ref - Take reference of a DRM device
633  * @dev: device to take reference of or NULL
634  *
635  * This increases the ref-count of @dev by one. You *must* already own a
636  * reference when calling this. Use drm_dev_unref() to drop this reference
637  * again.
638  *
639  * This function never fails. However, this function does not provide *any*
640  * guarantee whether the device is alive or running. It only provides a
641  * reference to the object and the memory associated with it.
642  */
643 void drm_dev_ref(struct drm_device *dev)
644 {
645 	if (dev)
646 		kref_get(&dev->ref);
647 }
648 EXPORT_SYMBOL(drm_dev_ref);
649 
650 /**
651  * drm_dev_unref - Drop reference of a DRM device
652  * @dev: device to drop reference of or NULL
653  *
654  * This decreases the ref-count of @dev by one. The device is destroyed if the
655  * ref-count drops to zero.
656  */
657 void drm_dev_unref(struct drm_device *dev)
658 {
659 	if (dev)
660 		kref_put(&dev->ref, drm_dev_release);
661 }
662 EXPORT_SYMBOL(drm_dev_unref);
663 
664 /**
665  * drm_dev_register - Register DRM device
666  * @dev: Device to register
667  * @flags: Flags passed to the driver's .load() function
668  *
669  * Register the DRM device @dev with the system, advertise device to user-space
670  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
671  * previously.
672  *
673  * Never call this twice on any device!
674  *
675  * RETURNS:
676  * 0 on success, negative error code on failure.
677  */
678 int drm_dev_register(struct drm_device *dev, unsigned long flags)
679 {
680 	int ret;
681 
682 	mutex_lock(&drm_global_mutex);
683 
684 	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
685 	if (ret)
686 		goto err_minors;
687 
688 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
689 	if (ret)
690 		goto err_minors;
691 
692 	ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
693 	if (ret)
694 		goto err_minors;
695 
696 	if (dev->driver->load) {
697 		ret = dev->driver->load(dev, flags);
698 		if (ret)
699 			goto err_minors;
700 	}
701 
702 	ret = 0;
703 	goto out_unlock;
704 
705 err_minors:
706 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
707 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
708 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
709 out_unlock:
710 	mutex_unlock(&drm_global_mutex);
711 	return ret;
712 }
713 EXPORT_SYMBOL(drm_dev_register);
714 
715 /**
716  * drm_dev_unregister - Unregister DRM device
717  * @dev: Device to unregister
718  *
719  * Unregister the DRM device from the system. This does the reverse of
720  * drm_dev_register() but does not deallocate the device. The caller must call
721  * drm_dev_unref() to drop their final reference.
722  */
723 void drm_dev_unregister(struct drm_device *dev)
724 {
725 	struct drm_map_list *r_list, *list_temp;
726 
727 	drm_lastclose(dev);
728 
729 	if (dev->driver->unload)
730 		dev->driver->unload(dev);
731 
732 	if (dev->agp)
733 		drm_pci_agp_destroy(dev);
734 
735 	drm_vblank_cleanup(dev);
736 
737 	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
738 		drm_legacy_rmmap(dev, r_list->map);
739 
740 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
741 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
742 	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
743 }
744 EXPORT_SYMBOL(drm_dev_unregister);
745 
746 /**
747  * drm_dev_set_unique - Set the unique name of a DRM device
748  * @dev: device of which to set the unique name
749  * @fmt: format string for unique name
750  *
751  * Sets the unique name of a DRM device using the specified format string and
752  * a variable list of arguments. Drivers can use this at driver probe time if
753  * the unique name of the devices they drive is static.
754  *
755  * Return: 0 on success or a negative error code on failure.
756  */
757 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
758 {
759 	va_list ap;
760 
761 	kfree(dev->unique);
762 
763 	va_start(ap, fmt);
764 	dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
765 	va_end(ap);
766 
767 	return dev->unique ? 0 : -ENOMEM;
768 }
769 EXPORT_SYMBOL(drm_dev_set_unique);
770 
771 /*
772  * DRM Core
773  * The DRM core module initializes all global DRM objects and makes them
774  * available to drivers. Once setup, drivers can probe their respective
775  * devices.
776  * Currently, core management includes:
777  *  - The "DRM-Global" key/value database
778  *  - Global ID management for connectors
779  *  - DRM major number allocation
780  *  - DRM minor management
781  *  - DRM sysfs class
782  *  - DRM debugfs root
783  *
784  * Furthermore, the DRM core provides dynamic char-dev lookups. For each
785  * interface registered on a DRM device, you can request minor numbers from DRM
786  * core. DRM core takes care of major-number management and char-dev
787  * registration. A stub ->open() callback forwards any open() requests to the
788  * registered minor.
789  */
790 
791 static int drm_stub_open(struct inode *inode, struct file *filp)
792 {
793 	const struct file_operations *new_fops;
794 	struct drm_minor *minor;
795 	int err;
796 
797 	DRM_DEBUG("\n");
798 
799 	mutex_lock(&drm_global_mutex);
800 	minor = drm_minor_acquire(iminor(inode));
801 	if (IS_ERR(minor)) {
802 		err = PTR_ERR(minor);
803 		goto out_unlock;
804 	}
805 
806 	new_fops = fops_get(minor->dev->driver->fops);
807 	if (!new_fops) {
808 		err = -ENODEV;
809 		goto out_release;
810 	}
811 
812 	replace_fops(filp, new_fops);
813 	if (filp->f_op->open)
814 		err = filp->f_op->open(inode, filp);
815 	else
816 		err = 0;
817 
818 out_release:
819 	drm_minor_release(minor);
820 out_unlock:
821 	mutex_unlock(&drm_global_mutex);
822 	return err;
823 }
824 
825 static const struct file_operations drm_stub_fops = {
826 	.owner = THIS_MODULE,
827 	.open = drm_stub_open,
828 	.llseek = noop_llseek,
829 };
830 
831 static int __init drm_core_init(void)
832 {
833 	int ret = -ENOMEM;
834 
835 	drm_global_init();
836 	drm_connector_ida_init();
837 	idr_init(&drm_minors_idr);
838 
839 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
840 		goto err_p1;
841 
842 	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
843 	if (IS_ERR(drm_class)) {
844 		printk(KERN_ERR "DRM: Error creating drm class.\n");
845 		ret = PTR_ERR(drm_class);
846 		goto err_p2;
847 	}
848 
849 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
850 	if (!drm_debugfs_root) {
851 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
852 		ret = -1;
853 		goto err_p3;
854 	}
855 
856 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
857 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
858 	return 0;
859 err_p3:
860 	drm_sysfs_destroy();
861 err_p2:
862 	unregister_chrdev(DRM_MAJOR, "drm");
863 
864 	idr_destroy(&drm_minors_idr);
865 err_p1:
866 	return ret;
867 }
868 
869 static void __exit drm_core_exit(void)
870 {
871 	debugfs_remove(drm_debugfs_root);
872 	drm_sysfs_destroy();
873 
874 	unregister_chrdev(DRM_MAJOR, "drm");
875 
876 	drm_connector_ida_destroy();
877 	idr_destroy(&drm_minors_idr);
878 }
879 
880 module_init(drm_core_init);
881 module_exit(drm_core_exit);
882