1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/component.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/of.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/uaccess.h>
13 
14 #include <drm/drm_debugfs.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_ioctl.h>
18 #include <drm/drm_of.h>
19 #include <drm/drm_prime.h>
20 
21 #include "etnaviv_cmdbuf.h"
22 #include "etnaviv_drv.h"
23 #include "etnaviv_gpu.h"
24 #include "etnaviv_gem.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_perfmon.h"
27 
28 /*
29  * DRM operations:
30  */
31 
32 
33 static void load_gpu(struct drm_device *dev)
34 {
35 	struct etnaviv_drm_private *priv = dev->dev_private;
36 	unsigned int i;
37 
38 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
39 		struct etnaviv_gpu *g = priv->gpu[i];
40 
41 		if (g) {
42 			int ret;
43 
44 			ret = etnaviv_gpu_init(g);
45 			if (ret)
46 				priv->gpu[i] = NULL;
47 		}
48 	}
49 }
50 
51 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
52 {
53 	struct etnaviv_drm_private *priv = dev->dev_private;
54 	struct etnaviv_file_private *ctx;
55 	int ret, i;
56 
57 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
58 	if (!ctx)
59 		return -ENOMEM;
60 
61 	ret = xa_alloc_cyclic(&priv->active_contexts, &ctx->id, ctx,
62 			      xa_limit_32b, &priv->next_context_id, GFP_KERNEL);
63 	if (ret < 0)
64 		goto out_free;
65 
66 	ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
67 					      priv->cmdbuf_suballoc);
68 	if (!ctx->mmu) {
69 		ret = -ENOMEM;
70 		goto out_free;
71 	}
72 
73 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
74 		struct etnaviv_gpu *gpu = priv->gpu[i];
75 		struct drm_gpu_scheduler *sched;
76 
77 		if (gpu) {
78 			sched = &gpu->sched;
79 			drm_sched_entity_init(&ctx->sched_entity[i],
80 					      DRM_SCHED_PRIORITY_NORMAL, &sched,
81 					      1, NULL);
82 			}
83 	}
84 
85 	file->driver_priv = ctx;
86 
87 	return 0;
88 
89 out_free:
90 	kfree(ctx);
91 	return ret;
92 }
93 
94 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
95 {
96 	struct etnaviv_drm_private *priv = dev->dev_private;
97 	struct etnaviv_file_private *ctx = file->driver_priv;
98 	unsigned int i;
99 
100 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
101 		struct etnaviv_gpu *gpu = priv->gpu[i];
102 
103 		if (gpu)
104 			drm_sched_entity_destroy(&ctx->sched_entity[i]);
105 	}
106 
107 	etnaviv_iommu_context_put(ctx->mmu);
108 
109 	xa_erase(&priv->active_contexts, ctx->id);
110 
111 	kfree(ctx);
112 }
113 
114 /*
115  * DRM debugfs:
116  */
117 
118 #ifdef CONFIG_DEBUG_FS
119 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
120 {
121 	struct etnaviv_drm_private *priv = dev->dev_private;
122 
123 	etnaviv_gem_describe_objects(priv, m);
124 
125 	return 0;
126 }
127 
128 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
129 {
130 	struct drm_printer p = drm_seq_file_printer(m);
131 
132 	read_lock(&dev->vma_offset_manager->vm_lock);
133 	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
134 	read_unlock(&dev->vma_offset_manager->vm_lock);
135 
136 	return 0;
137 }
138 
139 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
140 {
141 	struct drm_printer p = drm_seq_file_printer(m);
142 	struct etnaviv_iommu_context *mmu_context;
143 
144 	seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
145 
146 	/*
147 	 * Lock the GPU to avoid a MMU context switch just now and elevate
148 	 * the refcount of the current context to avoid it disappearing from
149 	 * under our feet.
150 	 */
151 	mutex_lock(&gpu->lock);
152 	mmu_context = gpu->mmu_context;
153 	if (mmu_context)
154 		etnaviv_iommu_context_get(mmu_context);
155 	mutex_unlock(&gpu->lock);
156 
157 	if (!mmu_context)
158 		return 0;
159 
160 	mutex_lock(&mmu_context->lock);
161 	drm_mm_print(&mmu_context->mm, &p);
162 	mutex_unlock(&mmu_context->lock);
163 
164 	etnaviv_iommu_context_put(mmu_context);
165 
166 	return 0;
167 }
168 
169 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
170 {
171 	struct etnaviv_cmdbuf *buf = &gpu->buffer;
172 	u32 size = buf->size;
173 	u32 *ptr = buf->vaddr;
174 	u32 i;
175 
176 	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
177 			buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
178 			size - buf->user_size);
179 
180 	for (i = 0; i < size / 4; i++) {
181 		if (i && !(i % 4))
182 			seq_puts(m, "\n");
183 		if (i % 4 == 0)
184 			seq_printf(m, "\t0x%p: ", ptr + i);
185 		seq_printf(m, "%08x ", *(ptr + i));
186 	}
187 	seq_puts(m, "\n");
188 }
189 
190 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
191 {
192 	seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
193 
194 	mutex_lock(&gpu->lock);
195 	etnaviv_buffer_dump(gpu, m);
196 	mutex_unlock(&gpu->lock);
197 
198 	return 0;
199 }
200 
201 static int show_unlocked(struct seq_file *m, void *arg)
202 {
203 	struct drm_info_node *node = (struct drm_info_node *) m->private;
204 	struct drm_device *dev = node->minor->dev;
205 	int (*show)(struct drm_device *dev, struct seq_file *m) =
206 			node->info_ent->data;
207 
208 	return show(dev, m);
209 }
210 
211 static int show_each_gpu(struct seq_file *m, void *arg)
212 {
213 	struct drm_info_node *node = (struct drm_info_node *) m->private;
214 	struct drm_device *dev = node->minor->dev;
215 	struct etnaviv_drm_private *priv = dev->dev_private;
216 	struct etnaviv_gpu *gpu;
217 	int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
218 			node->info_ent->data;
219 	unsigned int i;
220 	int ret = 0;
221 
222 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
223 		gpu = priv->gpu[i];
224 		if (!gpu)
225 			continue;
226 
227 		ret = show(gpu, m);
228 		if (ret < 0)
229 			break;
230 	}
231 
232 	return ret;
233 }
234 
235 static struct drm_info_list etnaviv_debugfs_list[] = {
236 		{"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
237 		{"gem", show_unlocked, 0, etnaviv_gem_show},
238 		{ "mm", show_unlocked, 0, etnaviv_mm_show },
239 		{"mmu", show_each_gpu, 0, etnaviv_mmu_show},
240 		{"ring", show_each_gpu, 0, etnaviv_ring_show},
241 };
242 
243 static void etnaviv_debugfs_init(struct drm_minor *minor)
244 {
245 	drm_debugfs_create_files(etnaviv_debugfs_list,
246 				 ARRAY_SIZE(etnaviv_debugfs_list),
247 				 minor->debugfs_root, minor);
248 }
249 #endif
250 
251 /*
252  * DRM ioctls:
253  */
254 
255 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
256 		struct drm_file *file)
257 {
258 	struct etnaviv_drm_private *priv = dev->dev_private;
259 	struct drm_etnaviv_param *args = data;
260 	struct etnaviv_gpu *gpu;
261 
262 	if (args->pipe >= ETNA_MAX_PIPES)
263 		return -EINVAL;
264 
265 	gpu = priv->gpu[args->pipe];
266 	if (!gpu)
267 		return -ENXIO;
268 
269 	return etnaviv_gpu_get_param(gpu, args->param, &args->value);
270 }
271 
272 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
273 		struct drm_file *file)
274 {
275 	struct drm_etnaviv_gem_new *args = data;
276 
277 	if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
278 			    ETNA_BO_FORCE_MMU))
279 		return -EINVAL;
280 
281 	return etnaviv_gem_new_handle(dev, file, args->size,
282 			args->flags, &args->handle);
283 }
284 
285 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
286 		struct drm_file *file)
287 {
288 	struct drm_etnaviv_gem_cpu_prep *args = data;
289 	struct drm_gem_object *obj;
290 	int ret;
291 
292 	if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
293 		return -EINVAL;
294 
295 	obj = drm_gem_object_lookup(file, args->handle);
296 	if (!obj)
297 		return -ENOENT;
298 
299 	ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
300 
301 	drm_gem_object_put(obj);
302 
303 	return ret;
304 }
305 
306 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
307 		struct drm_file *file)
308 {
309 	struct drm_etnaviv_gem_cpu_fini *args = data;
310 	struct drm_gem_object *obj;
311 	int ret;
312 
313 	if (args->flags)
314 		return -EINVAL;
315 
316 	obj = drm_gem_object_lookup(file, args->handle);
317 	if (!obj)
318 		return -ENOENT;
319 
320 	ret = etnaviv_gem_cpu_fini(obj);
321 
322 	drm_gem_object_put(obj);
323 
324 	return ret;
325 }
326 
327 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
328 		struct drm_file *file)
329 {
330 	struct drm_etnaviv_gem_info *args = data;
331 	struct drm_gem_object *obj;
332 	int ret;
333 
334 	if (args->pad)
335 		return -EINVAL;
336 
337 	obj = drm_gem_object_lookup(file, args->handle);
338 	if (!obj)
339 		return -ENOENT;
340 
341 	ret = etnaviv_gem_mmap_offset(obj, &args->offset);
342 	drm_gem_object_put(obj);
343 
344 	return ret;
345 }
346 
347 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
348 		struct drm_file *file)
349 {
350 	struct drm_etnaviv_wait_fence *args = data;
351 	struct etnaviv_drm_private *priv = dev->dev_private;
352 	struct drm_etnaviv_timespec *timeout = &args->timeout;
353 	struct etnaviv_gpu *gpu;
354 
355 	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
356 		return -EINVAL;
357 
358 	if (args->pipe >= ETNA_MAX_PIPES)
359 		return -EINVAL;
360 
361 	gpu = priv->gpu[args->pipe];
362 	if (!gpu)
363 		return -ENXIO;
364 
365 	if (args->flags & ETNA_WAIT_NONBLOCK)
366 		timeout = NULL;
367 
368 	return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
369 						    timeout);
370 }
371 
372 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
373 	struct drm_file *file)
374 {
375 	struct drm_etnaviv_gem_userptr *args = data;
376 
377 	if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
378 	    args->flags == 0)
379 		return -EINVAL;
380 
381 	if (offset_in_page(args->user_ptr | args->user_size) ||
382 	    (uintptr_t)args->user_ptr != args->user_ptr ||
383 	    (u32)args->user_size != args->user_size ||
384 	    args->user_ptr & ~PAGE_MASK)
385 		return -EINVAL;
386 
387 	if (!access_ok((void __user *)(unsigned long)args->user_ptr,
388 		       args->user_size))
389 		return -EFAULT;
390 
391 	return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
392 				       args->user_size, args->flags,
393 				       &args->handle);
394 }
395 
396 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
397 	struct drm_file *file)
398 {
399 	struct etnaviv_drm_private *priv = dev->dev_private;
400 	struct drm_etnaviv_gem_wait *args = data;
401 	struct drm_etnaviv_timespec *timeout = &args->timeout;
402 	struct drm_gem_object *obj;
403 	struct etnaviv_gpu *gpu;
404 	int ret;
405 
406 	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
407 		return -EINVAL;
408 
409 	if (args->pipe >= ETNA_MAX_PIPES)
410 		return -EINVAL;
411 
412 	gpu = priv->gpu[args->pipe];
413 	if (!gpu)
414 		return -ENXIO;
415 
416 	obj = drm_gem_object_lookup(file, args->handle);
417 	if (!obj)
418 		return -ENOENT;
419 
420 	if (args->flags & ETNA_WAIT_NONBLOCK)
421 		timeout = NULL;
422 
423 	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
424 
425 	drm_gem_object_put(obj);
426 
427 	return ret;
428 }
429 
430 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
431 	struct drm_file *file)
432 {
433 	struct etnaviv_drm_private *priv = dev->dev_private;
434 	struct drm_etnaviv_pm_domain *args = data;
435 	struct etnaviv_gpu *gpu;
436 
437 	if (args->pipe >= ETNA_MAX_PIPES)
438 		return -EINVAL;
439 
440 	gpu = priv->gpu[args->pipe];
441 	if (!gpu)
442 		return -ENXIO;
443 
444 	return etnaviv_pm_query_dom(gpu, args);
445 }
446 
447 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
448 	struct drm_file *file)
449 {
450 	struct etnaviv_drm_private *priv = dev->dev_private;
451 	struct drm_etnaviv_pm_signal *args = data;
452 	struct etnaviv_gpu *gpu;
453 
454 	if (args->pipe >= ETNA_MAX_PIPES)
455 		return -EINVAL;
456 
457 	gpu = priv->gpu[args->pipe];
458 	if (!gpu)
459 		return -ENXIO;
460 
461 	return etnaviv_pm_query_sig(gpu, args);
462 }
463 
464 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
465 #define ETNA_IOCTL(n, func, flags) \
466 	DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
467 	ETNA_IOCTL(GET_PARAM,    get_param,    DRM_RENDER_ALLOW),
468 	ETNA_IOCTL(GEM_NEW,      gem_new,      DRM_RENDER_ALLOW),
469 	ETNA_IOCTL(GEM_INFO,     gem_info,     DRM_RENDER_ALLOW),
470 	ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
471 	ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
472 	ETNA_IOCTL(GEM_SUBMIT,   gem_submit,   DRM_RENDER_ALLOW),
473 	ETNA_IOCTL(WAIT_FENCE,   wait_fence,   DRM_RENDER_ALLOW),
474 	ETNA_IOCTL(GEM_USERPTR,  gem_userptr,  DRM_RENDER_ALLOW),
475 	ETNA_IOCTL(GEM_WAIT,     gem_wait,     DRM_RENDER_ALLOW),
476 	ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
477 	ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
478 };
479 
480 DEFINE_DRM_GEM_FOPS(fops);
481 
482 static const struct drm_driver etnaviv_drm_driver = {
483 	.driver_features    = DRIVER_GEM | DRIVER_RENDER,
484 	.open               = etnaviv_open,
485 	.postclose           = etnaviv_postclose,
486 	.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
487 #ifdef CONFIG_DEBUG_FS
488 	.debugfs_init       = etnaviv_debugfs_init,
489 #endif
490 	.ioctls             = etnaviv_ioctls,
491 	.num_ioctls         = DRM_ETNAVIV_NUM_IOCTLS,
492 	.fops               = &fops,
493 	.name               = "etnaviv",
494 	.desc               = "etnaviv DRM",
495 	.date               = "20151214",
496 	.major              = 1,
497 	.minor              = 4,
498 };
499 
500 /*
501  * Platform driver:
502  */
503 static int etnaviv_bind(struct device *dev)
504 {
505 	struct etnaviv_drm_private *priv;
506 	struct drm_device *drm;
507 	int ret;
508 
509 	drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
510 	if (IS_ERR(drm))
511 		return PTR_ERR(drm);
512 
513 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
514 	if (!priv) {
515 		dev_err(dev, "failed to allocate private data\n");
516 		ret = -ENOMEM;
517 		goto out_put;
518 	}
519 	drm->dev_private = priv;
520 
521 	dma_set_max_seg_size(dev, SZ_2G);
522 
523 	xa_init_flags(&priv->active_contexts, XA_FLAGS_ALLOC);
524 
525 	mutex_init(&priv->gem_lock);
526 	INIT_LIST_HEAD(&priv->gem_list);
527 	priv->num_gpus = 0;
528 	priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
529 
530 	/*
531 	 * If the GPU is part of a system with DMA addressing limitations,
532 	 * request pages for our SHM backend buffers from the DMA32 zone to
533 	 * hopefully avoid performance killing SWIOTLB bounce buffering.
534 	 */
535 	if (dma_addressing_limited(dev)) {
536 		priv->shm_gfp_mask |= GFP_DMA32;
537 		priv->shm_gfp_mask &= ~__GFP_HIGHMEM;
538 	}
539 
540 	priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
541 	if (IS_ERR(priv->cmdbuf_suballoc)) {
542 		dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
543 		ret = PTR_ERR(priv->cmdbuf_suballoc);
544 		goto out_free_priv;
545 	}
546 
547 	dev_set_drvdata(dev, drm);
548 
549 	ret = component_bind_all(dev, drm);
550 	if (ret < 0)
551 		goto out_destroy_suballoc;
552 
553 	load_gpu(drm);
554 
555 	ret = drm_dev_register(drm, 0);
556 	if (ret)
557 		goto out_unbind;
558 
559 	return 0;
560 
561 out_unbind:
562 	component_unbind_all(dev, drm);
563 out_destroy_suballoc:
564 	etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
565 out_free_priv:
566 	kfree(priv);
567 out_put:
568 	drm_dev_put(drm);
569 
570 	return ret;
571 }
572 
573 static void etnaviv_unbind(struct device *dev)
574 {
575 	struct drm_device *drm = dev_get_drvdata(dev);
576 	struct etnaviv_drm_private *priv = drm->dev_private;
577 
578 	drm_dev_unregister(drm);
579 
580 	component_unbind_all(dev, drm);
581 
582 	etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
583 
584 	xa_destroy(&priv->active_contexts);
585 
586 	drm->dev_private = NULL;
587 	kfree(priv);
588 
589 	drm_dev_put(drm);
590 }
591 
592 static const struct component_master_ops etnaviv_master_ops = {
593 	.bind = etnaviv_bind,
594 	.unbind = etnaviv_unbind,
595 };
596 
597 static int etnaviv_pdev_probe(struct platform_device *pdev)
598 {
599 	struct device *dev = &pdev->dev;
600 	struct device_node *first_node = NULL;
601 	struct component_match *match = NULL;
602 
603 	if (!dev->platform_data) {
604 		struct device_node *core_node;
605 
606 		for_each_compatible_node(core_node, NULL, "vivante,gc") {
607 			if (!of_device_is_available(core_node))
608 				continue;
609 
610 			if (!first_node)
611 				first_node = core_node;
612 
613 			drm_of_component_match_add(&pdev->dev, &match,
614 						   component_compare_of, core_node);
615 		}
616 	} else {
617 		char **names = dev->platform_data;
618 		unsigned i;
619 
620 		for (i = 0; names[i]; i++)
621 			component_match_add(dev, &match, component_compare_dev_name, names[i]);
622 	}
623 
624 	/*
625 	 * PTA and MTLB can have 40 bit base addresses, but
626 	 * unfortunately, an entry in the MTLB can only point to a
627 	 * 32 bit base address of a STLB. Moreover, to initialize the
628 	 * MMU we need a command buffer with a 32 bit address because
629 	 * without an MMU there is only an indentity mapping between
630 	 * the internal 32 bit addresses and the bus addresses.
631 	 *
632 	 * To make things easy, we set the dma_coherent_mask to 32
633 	 * bit to make sure we are allocating the command buffers and
634 	 * TLBs in the lower 4 GiB address space.
635 	 */
636 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
637 	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
638 		dev_dbg(&pdev->dev, "No suitable DMA available\n");
639 		return -ENODEV;
640 	}
641 
642 	/*
643 	 * Apply the same DMA configuration to the virtual etnaviv
644 	 * device as the GPU we found. This assumes that all Vivante
645 	 * GPUs in the system share the same DMA constraints.
646 	 */
647 	if (first_node)
648 		of_dma_configure(&pdev->dev, first_node, true);
649 
650 	return component_master_add_with_match(dev, &etnaviv_master_ops, match);
651 }
652 
653 static int etnaviv_pdev_remove(struct platform_device *pdev)
654 {
655 	component_master_del(&pdev->dev, &etnaviv_master_ops);
656 
657 	return 0;
658 }
659 
660 static struct platform_driver etnaviv_platform_driver = {
661 	.probe      = etnaviv_pdev_probe,
662 	.remove     = etnaviv_pdev_remove,
663 	.driver     = {
664 		.name   = "etnaviv",
665 	},
666 };
667 
668 static struct platform_device *etnaviv_drm;
669 
670 static int __init etnaviv_init(void)
671 {
672 	struct platform_device *pdev;
673 	int ret;
674 	struct device_node *np;
675 
676 	etnaviv_validate_init();
677 
678 	ret = platform_driver_register(&etnaviv_gpu_driver);
679 	if (ret != 0)
680 		return ret;
681 
682 	ret = platform_driver_register(&etnaviv_platform_driver);
683 	if (ret != 0)
684 		goto unregister_gpu_driver;
685 
686 	/*
687 	 * If the DT contains at least one available GPU device, instantiate
688 	 * the DRM platform device.
689 	 */
690 	for_each_compatible_node(np, NULL, "vivante,gc") {
691 		if (!of_device_is_available(np))
692 			continue;
693 
694 		pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
695 		if (!pdev) {
696 			ret = -ENOMEM;
697 			of_node_put(np);
698 			goto unregister_platform_driver;
699 		}
700 
701 		ret = platform_device_add(pdev);
702 		if (ret) {
703 			platform_device_put(pdev);
704 			of_node_put(np);
705 			goto unregister_platform_driver;
706 		}
707 
708 		etnaviv_drm = pdev;
709 		of_node_put(np);
710 		break;
711 	}
712 
713 	return 0;
714 
715 unregister_platform_driver:
716 	platform_driver_unregister(&etnaviv_platform_driver);
717 unregister_gpu_driver:
718 	platform_driver_unregister(&etnaviv_gpu_driver);
719 	return ret;
720 }
721 module_init(etnaviv_init);
722 
723 static void __exit etnaviv_exit(void)
724 {
725 	platform_device_unregister(etnaviv_drm);
726 	platform_driver_unregister(&etnaviv_platform_driver);
727 	platform_driver_unregister(&etnaviv_gpu_driver);
728 }
729 module_exit(etnaviv_exit);
730 
731 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
732 MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
733 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
734 MODULE_DESCRIPTION("etnaviv DRM Driver");
735 MODULE_LICENSE("GPL v2");
736 MODULE_ALIAS("platform:etnaviv");
737