1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/component.h>
18 #include <linux/of_platform.h>
19 #include <drm/drm_of.h>
20 
21 #include "etnaviv_cmdbuf.h"
22 #include "etnaviv_drv.h"
23 #include "etnaviv_gpu.h"
24 #include "etnaviv_gem.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_perfmon.h"
27 
28 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
29 static bool reglog;
30 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
31 module_param(reglog, bool, 0600);
32 #else
33 #define reglog 0
34 #endif
35 
36 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
37 		const char *dbgname)
38 {
39 	struct resource *res;
40 	void __iomem *ptr;
41 
42 	if (name)
43 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
44 	else
45 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46 
47 	ptr = devm_ioremap_resource(&pdev->dev, res);
48 	if (IS_ERR(ptr)) {
49 		dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
50 			PTR_ERR(ptr));
51 		return ptr;
52 	}
53 
54 	if (reglog)
55 		dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
56 			   dbgname, ptr, (size_t)resource_size(res));
57 
58 	return ptr;
59 }
60 
61 void etnaviv_writel(u32 data, void __iomem *addr)
62 {
63 	if (reglog)
64 		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
65 
66 	writel(data, addr);
67 }
68 
69 u32 etnaviv_readl(const void __iomem *addr)
70 {
71 	u32 val = readl(addr);
72 
73 	if (reglog)
74 		printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
75 
76 	return val;
77 }
78 
79 /*
80  * DRM operations:
81  */
82 
83 
84 static void load_gpu(struct drm_device *dev)
85 {
86 	struct etnaviv_drm_private *priv = dev->dev_private;
87 	unsigned int i;
88 
89 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
90 		struct etnaviv_gpu *g = priv->gpu[i];
91 
92 		if (g) {
93 			int ret;
94 
95 			ret = etnaviv_gpu_init(g);
96 			if (ret)
97 				priv->gpu[i] = NULL;
98 		}
99 	}
100 }
101 
102 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
103 {
104 	struct etnaviv_drm_private *priv = dev->dev_private;
105 	struct etnaviv_file_private *ctx;
106 	int i;
107 
108 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
109 	if (!ctx)
110 		return -ENOMEM;
111 
112 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
113 		struct etnaviv_gpu *gpu = priv->gpu[i];
114 
115 		if (gpu) {
116 			drm_sched_entity_init(&gpu->sched,
117 				&ctx->sched_entity[i],
118 				&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
119 				32, NULL);
120 			}
121 	}
122 
123 	file->driver_priv = ctx;
124 
125 	return 0;
126 }
127 
128 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
129 {
130 	struct etnaviv_drm_private *priv = dev->dev_private;
131 	struct etnaviv_file_private *ctx = file->driver_priv;
132 	unsigned int i;
133 
134 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
135 		struct etnaviv_gpu *gpu = priv->gpu[i];
136 
137 		if (gpu) {
138 			mutex_lock(&gpu->lock);
139 			if (gpu->lastctx == ctx)
140 				gpu->lastctx = NULL;
141 			mutex_unlock(&gpu->lock);
142 
143 			drm_sched_entity_fini(&gpu->sched,
144 					      &ctx->sched_entity[i]);
145 		}
146 	}
147 
148 	kfree(ctx);
149 }
150 
151 /*
152  * DRM debugfs:
153  */
154 
155 #ifdef CONFIG_DEBUG_FS
156 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
157 {
158 	struct etnaviv_drm_private *priv = dev->dev_private;
159 
160 	etnaviv_gem_describe_objects(priv, m);
161 
162 	return 0;
163 }
164 
165 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
166 {
167 	struct drm_printer p = drm_seq_file_printer(m);
168 
169 	read_lock(&dev->vma_offset_manager->vm_lock);
170 	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
171 	read_unlock(&dev->vma_offset_manager->vm_lock);
172 
173 	return 0;
174 }
175 
176 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
177 {
178 	struct drm_printer p = drm_seq_file_printer(m);
179 
180 	seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
181 
182 	mutex_lock(&gpu->mmu->lock);
183 	drm_mm_print(&gpu->mmu->mm, &p);
184 	mutex_unlock(&gpu->mmu->lock);
185 
186 	return 0;
187 }
188 
189 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
190 {
191 	struct etnaviv_cmdbuf *buf = &gpu->buffer;
192 	u32 size = buf->size;
193 	u32 *ptr = buf->vaddr;
194 	u32 i;
195 
196 	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
197 			buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
198 			size - buf->user_size);
199 
200 	for (i = 0; i < size / 4; i++) {
201 		if (i && !(i % 4))
202 			seq_puts(m, "\n");
203 		if (i % 4 == 0)
204 			seq_printf(m, "\t0x%p: ", ptr + i);
205 		seq_printf(m, "%08x ", *(ptr + i));
206 	}
207 	seq_puts(m, "\n");
208 }
209 
210 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
211 {
212 	seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
213 
214 	mutex_lock(&gpu->lock);
215 	etnaviv_buffer_dump(gpu, m);
216 	mutex_unlock(&gpu->lock);
217 
218 	return 0;
219 }
220 
221 static int show_unlocked(struct seq_file *m, void *arg)
222 {
223 	struct drm_info_node *node = (struct drm_info_node *) m->private;
224 	struct drm_device *dev = node->minor->dev;
225 	int (*show)(struct drm_device *dev, struct seq_file *m) =
226 			node->info_ent->data;
227 
228 	return show(dev, m);
229 }
230 
231 static int show_each_gpu(struct seq_file *m, void *arg)
232 {
233 	struct drm_info_node *node = (struct drm_info_node *) m->private;
234 	struct drm_device *dev = node->minor->dev;
235 	struct etnaviv_drm_private *priv = dev->dev_private;
236 	struct etnaviv_gpu *gpu;
237 	int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
238 			node->info_ent->data;
239 	unsigned int i;
240 	int ret = 0;
241 
242 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
243 		gpu = priv->gpu[i];
244 		if (!gpu)
245 			continue;
246 
247 		ret = show(gpu, m);
248 		if (ret < 0)
249 			break;
250 	}
251 
252 	return ret;
253 }
254 
255 static struct drm_info_list etnaviv_debugfs_list[] = {
256 		{"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
257 		{"gem", show_unlocked, 0, etnaviv_gem_show},
258 		{ "mm", show_unlocked, 0, etnaviv_mm_show },
259 		{"mmu", show_each_gpu, 0, etnaviv_mmu_show},
260 		{"ring", show_each_gpu, 0, etnaviv_ring_show},
261 };
262 
263 static int etnaviv_debugfs_init(struct drm_minor *minor)
264 {
265 	struct drm_device *dev = minor->dev;
266 	int ret;
267 
268 	ret = drm_debugfs_create_files(etnaviv_debugfs_list,
269 			ARRAY_SIZE(etnaviv_debugfs_list),
270 			minor->debugfs_root, minor);
271 
272 	if (ret) {
273 		dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
274 		return ret;
275 	}
276 
277 	return ret;
278 }
279 #endif
280 
281 /*
282  * DRM ioctls:
283  */
284 
285 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
286 		struct drm_file *file)
287 {
288 	struct etnaviv_drm_private *priv = dev->dev_private;
289 	struct drm_etnaviv_param *args = data;
290 	struct etnaviv_gpu *gpu;
291 
292 	if (args->pipe >= ETNA_MAX_PIPES)
293 		return -EINVAL;
294 
295 	gpu = priv->gpu[args->pipe];
296 	if (!gpu)
297 		return -ENXIO;
298 
299 	return etnaviv_gpu_get_param(gpu, args->param, &args->value);
300 }
301 
302 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
303 		struct drm_file *file)
304 {
305 	struct drm_etnaviv_gem_new *args = data;
306 
307 	if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
308 			    ETNA_BO_FORCE_MMU))
309 		return -EINVAL;
310 
311 	return etnaviv_gem_new_handle(dev, file, args->size,
312 			args->flags, &args->handle);
313 }
314 
315 #define TS(t) ((struct timespec){ \
316 	.tv_sec = (t).tv_sec, \
317 	.tv_nsec = (t).tv_nsec \
318 })
319 
320 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
321 		struct drm_file *file)
322 {
323 	struct drm_etnaviv_gem_cpu_prep *args = data;
324 	struct drm_gem_object *obj;
325 	int ret;
326 
327 	if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
328 		return -EINVAL;
329 
330 	obj = drm_gem_object_lookup(file, args->handle);
331 	if (!obj)
332 		return -ENOENT;
333 
334 	ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
335 
336 	drm_gem_object_put_unlocked(obj);
337 
338 	return ret;
339 }
340 
341 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
342 		struct drm_file *file)
343 {
344 	struct drm_etnaviv_gem_cpu_fini *args = data;
345 	struct drm_gem_object *obj;
346 	int ret;
347 
348 	if (args->flags)
349 		return -EINVAL;
350 
351 	obj = drm_gem_object_lookup(file, args->handle);
352 	if (!obj)
353 		return -ENOENT;
354 
355 	ret = etnaviv_gem_cpu_fini(obj);
356 
357 	drm_gem_object_put_unlocked(obj);
358 
359 	return ret;
360 }
361 
362 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
363 		struct drm_file *file)
364 {
365 	struct drm_etnaviv_gem_info *args = data;
366 	struct drm_gem_object *obj;
367 	int ret;
368 
369 	if (args->pad)
370 		return -EINVAL;
371 
372 	obj = drm_gem_object_lookup(file, args->handle);
373 	if (!obj)
374 		return -ENOENT;
375 
376 	ret = etnaviv_gem_mmap_offset(obj, &args->offset);
377 	drm_gem_object_put_unlocked(obj);
378 
379 	return ret;
380 }
381 
382 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
383 		struct drm_file *file)
384 {
385 	struct drm_etnaviv_wait_fence *args = data;
386 	struct etnaviv_drm_private *priv = dev->dev_private;
387 	struct timespec *timeout = &TS(args->timeout);
388 	struct etnaviv_gpu *gpu;
389 
390 	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
391 		return -EINVAL;
392 
393 	if (args->pipe >= ETNA_MAX_PIPES)
394 		return -EINVAL;
395 
396 	gpu = priv->gpu[args->pipe];
397 	if (!gpu)
398 		return -ENXIO;
399 
400 	if (args->flags & ETNA_WAIT_NONBLOCK)
401 		timeout = NULL;
402 
403 	return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
404 						    timeout);
405 }
406 
407 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
408 	struct drm_file *file)
409 {
410 	struct drm_etnaviv_gem_userptr *args = data;
411 	int access;
412 
413 	if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
414 	    args->flags == 0)
415 		return -EINVAL;
416 
417 	if (offset_in_page(args->user_ptr | args->user_size) ||
418 	    (uintptr_t)args->user_ptr != args->user_ptr ||
419 	    (u32)args->user_size != args->user_size ||
420 	    args->user_ptr & ~PAGE_MASK)
421 		return -EINVAL;
422 
423 	if (args->flags & ETNA_USERPTR_WRITE)
424 		access = VERIFY_WRITE;
425 	else
426 		access = VERIFY_READ;
427 
428 	if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
429 		       args->user_size))
430 		return -EFAULT;
431 
432 	return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
433 				       args->user_size, args->flags,
434 				       &args->handle);
435 }
436 
437 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
438 	struct drm_file *file)
439 {
440 	struct etnaviv_drm_private *priv = dev->dev_private;
441 	struct drm_etnaviv_gem_wait *args = data;
442 	struct timespec *timeout = &TS(args->timeout);
443 	struct drm_gem_object *obj;
444 	struct etnaviv_gpu *gpu;
445 	int ret;
446 
447 	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
448 		return -EINVAL;
449 
450 	if (args->pipe >= ETNA_MAX_PIPES)
451 		return -EINVAL;
452 
453 	gpu = priv->gpu[args->pipe];
454 	if (!gpu)
455 		return -ENXIO;
456 
457 	obj = drm_gem_object_lookup(file, args->handle);
458 	if (!obj)
459 		return -ENOENT;
460 
461 	if (args->flags & ETNA_WAIT_NONBLOCK)
462 		timeout = NULL;
463 
464 	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
465 
466 	drm_gem_object_put_unlocked(obj);
467 
468 	return ret;
469 }
470 
471 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
472 	struct drm_file *file)
473 {
474 	struct etnaviv_drm_private *priv = dev->dev_private;
475 	struct drm_etnaviv_pm_domain *args = data;
476 	struct etnaviv_gpu *gpu;
477 
478 	if (args->pipe >= ETNA_MAX_PIPES)
479 		return -EINVAL;
480 
481 	gpu = priv->gpu[args->pipe];
482 	if (!gpu)
483 		return -ENXIO;
484 
485 	return etnaviv_pm_query_dom(gpu, args);
486 }
487 
488 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
489 	struct drm_file *file)
490 {
491 	struct etnaviv_drm_private *priv = dev->dev_private;
492 	struct drm_etnaviv_pm_signal *args = data;
493 	struct etnaviv_gpu *gpu;
494 
495 	if (args->pipe >= ETNA_MAX_PIPES)
496 		return -EINVAL;
497 
498 	gpu = priv->gpu[args->pipe];
499 	if (!gpu)
500 		return -ENXIO;
501 
502 	return etnaviv_pm_query_sig(gpu, args);
503 }
504 
505 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
506 #define ETNA_IOCTL(n, func, flags) \
507 	DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
508 	ETNA_IOCTL(GET_PARAM,    get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
509 	ETNA_IOCTL(GEM_NEW,      gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
510 	ETNA_IOCTL(GEM_INFO,     gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
511 	ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
512 	ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
513 	ETNA_IOCTL(GEM_SUBMIT,   gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
514 	ETNA_IOCTL(WAIT_FENCE,   wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
515 	ETNA_IOCTL(GEM_USERPTR,  gem_userptr,  DRM_AUTH|DRM_RENDER_ALLOW),
516 	ETNA_IOCTL(GEM_WAIT,     gem_wait,     DRM_AUTH|DRM_RENDER_ALLOW),
517 	ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
518 	ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
519 };
520 
521 static const struct vm_operations_struct vm_ops = {
522 	.fault = etnaviv_gem_fault,
523 	.open = drm_gem_vm_open,
524 	.close = drm_gem_vm_close,
525 };
526 
527 static const struct file_operations fops = {
528 	.owner              = THIS_MODULE,
529 	.open               = drm_open,
530 	.release            = drm_release,
531 	.unlocked_ioctl     = drm_ioctl,
532 	.compat_ioctl       = drm_compat_ioctl,
533 	.poll               = drm_poll,
534 	.read               = drm_read,
535 	.llseek             = no_llseek,
536 	.mmap               = etnaviv_gem_mmap,
537 };
538 
539 static struct drm_driver etnaviv_drm_driver = {
540 	.driver_features    = DRIVER_GEM |
541 				DRIVER_PRIME |
542 				DRIVER_RENDER,
543 	.open               = etnaviv_open,
544 	.postclose           = etnaviv_postclose,
545 	.gem_free_object_unlocked = etnaviv_gem_free_object,
546 	.gem_vm_ops         = &vm_ops,
547 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
548 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
549 	.gem_prime_export   = drm_gem_prime_export,
550 	.gem_prime_import   = drm_gem_prime_import,
551 	.gem_prime_res_obj  = etnaviv_gem_prime_res_obj,
552 	.gem_prime_pin      = etnaviv_gem_prime_pin,
553 	.gem_prime_unpin    = etnaviv_gem_prime_unpin,
554 	.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
555 	.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
556 	.gem_prime_vmap     = etnaviv_gem_prime_vmap,
557 	.gem_prime_vunmap   = etnaviv_gem_prime_vunmap,
558 	.gem_prime_mmap     = etnaviv_gem_prime_mmap,
559 #ifdef CONFIG_DEBUG_FS
560 	.debugfs_init       = etnaviv_debugfs_init,
561 #endif
562 	.ioctls             = etnaviv_ioctls,
563 	.num_ioctls         = DRM_ETNAVIV_NUM_IOCTLS,
564 	.fops               = &fops,
565 	.name               = "etnaviv",
566 	.desc               = "etnaviv DRM",
567 	.date               = "20151214",
568 	.major              = 1,
569 	.minor              = 2,
570 };
571 
572 /*
573  * Platform driver:
574  */
575 static int etnaviv_bind(struct device *dev)
576 {
577 	struct etnaviv_drm_private *priv;
578 	struct drm_device *drm;
579 	int ret;
580 
581 	drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
582 	if (IS_ERR(drm))
583 		return PTR_ERR(drm);
584 
585 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
586 	if (!priv) {
587 		dev_err(dev, "failed to allocate private data\n");
588 		ret = -ENOMEM;
589 		goto out_unref;
590 	}
591 	drm->dev_private = priv;
592 
593 	mutex_init(&priv->gem_lock);
594 	INIT_LIST_HEAD(&priv->gem_list);
595 	priv->num_gpus = 0;
596 
597 	dev_set_drvdata(dev, drm);
598 
599 	ret = component_bind_all(dev, drm);
600 	if (ret < 0)
601 		goto out_bind;
602 
603 	load_gpu(drm);
604 
605 	ret = drm_dev_register(drm, 0);
606 	if (ret)
607 		goto out_register;
608 
609 	return 0;
610 
611 out_register:
612 	component_unbind_all(dev, drm);
613 out_bind:
614 	kfree(priv);
615 out_unref:
616 	drm_dev_unref(drm);
617 
618 	return ret;
619 }
620 
621 static void etnaviv_unbind(struct device *dev)
622 {
623 	struct drm_device *drm = dev_get_drvdata(dev);
624 	struct etnaviv_drm_private *priv = drm->dev_private;
625 
626 	drm_dev_unregister(drm);
627 
628 	component_unbind_all(dev, drm);
629 
630 	drm->dev_private = NULL;
631 	kfree(priv);
632 
633 	drm_dev_unref(drm);
634 }
635 
636 static const struct component_master_ops etnaviv_master_ops = {
637 	.bind = etnaviv_bind,
638 	.unbind = etnaviv_unbind,
639 };
640 
641 static int compare_of(struct device *dev, void *data)
642 {
643 	struct device_node *np = data;
644 
645 	return dev->of_node == np;
646 }
647 
648 static int compare_str(struct device *dev, void *data)
649 {
650 	return !strcmp(dev_name(dev), data);
651 }
652 
653 static int etnaviv_pdev_probe(struct platform_device *pdev)
654 {
655 	struct device *dev = &pdev->dev;
656 	struct component_match *match = NULL;
657 
658 	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
659 
660 	if (!dev->platform_data) {
661 		struct device_node *core_node;
662 
663 		for_each_compatible_node(core_node, NULL, "vivante,gc") {
664 			if (!of_device_is_available(core_node))
665 				continue;
666 
667 			drm_of_component_match_add(&pdev->dev, &match,
668 						   compare_of, core_node);
669 		}
670 	} else {
671 		char **names = dev->platform_data;
672 		unsigned i;
673 
674 		for (i = 0; names[i]; i++)
675 			component_match_add(dev, &match, compare_str, names[i]);
676 	}
677 
678 	return component_master_add_with_match(dev, &etnaviv_master_ops, match);
679 }
680 
681 static int etnaviv_pdev_remove(struct platform_device *pdev)
682 {
683 	component_master_del(&pdev->dev, &etnaviv_master_ops);
684 
685 	return 0;
686 }
687 
688 static struct platform_driver etnaviv_platform_driver = {
689 	.probe      = etnaviv_pdev_probe,
690 	.remove     = etnaviv_pdev_remove,
691 	.driver     = {
692 		.name   = "etnaviv",
693 	},
694 };
695 
696 static int __init etnaviv_init(void)
697 {
698 	int ret;
699 	struct device_node *np;
700 
701 	etnaviv_validate_init();
702 
703 	ret = platform_driver_register(&etnaviv_gpu_driver);
704 	if (ret != 0)
705 		return ret;
706 
707 	ret = platform_driver_register(&etnaviv_platform_driver);
708 	if (ret != 0)
709 		platform_driver_unregister(&etnaviv_gpu_driver);
710 
711 	/*
712 	 * If the DT contains at least one available GPU device, instantiate
713 	 * the DRM platform device.
714 	 */
715 	for_each_compatible_node(np, NULL, "vivante,gc") {
716 		if (!of_device_is_available(np))
717 			continue;
718 
719 		platform_device_register_simple("etnaviv", -1, NULL, 0);
720 		of_node_put(np);
721 		break;
722 	}
723 
724 	return ret;
725 }
726 module_init(etnaviv_init);
727 
728 static void __exit etnaviv_exit(void)
729 {
730 	platform_driver_unregister(&etnaviv_gpu_driver);
731 	platform_driver_unregister(&etnaviv_platform_driver);
732 }
733 module_exit(etnaviv_exit);
734 
735 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
736 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
737 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
738 MODULE_DESCRIPTION("etnaviv DRM Driver");
739 MODULE_LICENSE("GPL v2");
740 MODULE_ALIAS("platform:etnaviv");
741