xref: /openbmc/linux/drivers/gpu/drm/msm/msm_drv.c (revision 828ff2ad)
1 /*
2  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/kthread.h>
20 #include <uapi/linux/sched/types.h>
21 #include <drm/drm_of.h>
22 
23 #include "msm_drv.h"
24 #include "msm_debugfs.h"
25 #include "msm_fence.h"
26 #include "msm_gpu.h"
27 #include "msm_kms.h"
28 
29 
30 /*
31  * MSM driver version:
32  * - 1.0.0 - initial interface
33  * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
34  * - 1.2.0 - adds explicit fence support for submit ioctl
35  * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
36  *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
37  *           MSM_GEM_INFO ioctl.
38  */
39 #define MSM_VERSION_MAJOR	1
40 #define MSM_VERSION_MINOR	3
41 #define MSM_VERSION_PATCHLEVEL	0
42 
43 static const struct drm_mode_config_funcs mode_config_funcs = {
44 	.fb_create = msm_framebuffer_create,
45 	.output_poll_changed = drm_fb_helper_output_poll_changed,
46 	.atomic_check = drm_atomic_helper_check,
47 	.atomic_commit = drm_atomic_helper_commit,
48 };
49 
50 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
51 	.atomic_commit_tail = msm_atomic_commit_tail,
52 };
53 
54 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
55 static bool reglog = false;
56 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
57 module_param(reglog, bool, 0600);
58 #else
59 #define reglog 0
60 #endif
61 
62 #ifdef CONFIG_DRM_FBDEV_EMULATION
63 static bool fbdev = true;
64 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
65 module_param(fbdev, bool, 0600);
66 #endif
67 
68 static char *vram = "16m";
69 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
70 module_param(vram, charp, 0);
71 
72 bool dumpstate = false;
73 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
74 module_param(dumpstate, bool, 0600);
75 
76 static bool modeset = true;
77 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
78 module_param(modeset, bool, 0600);
79 
80 /*
81  * Util/helpers:
82  */
83 
84 int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
85 {
86 	struct property *prop;
87 	const char *name;
88 	struct clk_bulk_data *local;
89 	int i = 0, ret, count;
90 
91 	count = of_property_count_strings(dev->of_node, "clock-names");
92 	if (count < 1)
93 		return 0;
94 
95 	local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
96 		count, GFP_KERNEL);
97 	if (!local)
98 		return -ENOMEM;
99 
100 	of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
101 		local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
102 		if (!local[i].id) {
103 			devm_kfree(dev, local);
104 			return -ENOMEM;
105 		}
106 
107 		i++;
108 	}
109 
110 	ret = devm_clk_bulk_get(dev, count, local);
111 
112 	if (ret) {
113 		for (i = 0; i < count; i++)
114 			devm_kfree(dev, (void *) local[i].id);
115 		devm_kfree(dev, local);
116 
117 		return ret;
118 	}
119 
120 	*bulk = local;
121 	return count;
122 }
123 
124 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
125 		const char *name)
126 {
127 	int i;
128 	char n[32];
129 
130 	snprintf(n, sizeof(n), "%s_clk", name);
131 
132 	for (i = 0; bulk && i < count; i++) {
133 		if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
134 			return bulk[i].clk;
135 	}
136 
137 
138 	return NULL;
139 }
140 
141 struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
142 {
143 	struct clk *clk;
144 	char name2[32];
145 
146 	clk = devm_clk_get(&pdev->dev, name);
147 	if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
148 		return clk;
149 
150 	snprintf(name2, sizeof(name2), "%s_clk", name);
151 
152 	clk = devm_clk_get(&pdev->dev, name2);
153 	if (!IS_ERR(clk))
154 		dev_warn(&pdev->dev, "Using legacy clk name binding.  Use "
155 				"\"%s\" instead of \"%s\"\n", name, name2);
156 
157 	return clk;
158 }
159 
160 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
161 		const char *dbgname)
162 {
163 	struct resource *res;
164 	unsigned long size;
165 	void __iomem *ptr;
166 
167 	if (name)
168 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
169 	else
170 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171 
172 	if (!res) {
173 		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
174 		return ERR_PTR(-EINVAL);
175 	}
176 
177 	size = resource_size(res);
178 
179 	ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
180 	if (!ptr) {
181 		dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
182 		return ERR_PTR(-ENOMEM);
183 	}
184 
185 	if (reglog)
186 		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
187 
188 	return ptr;
189 }
190 
191 void msm_writel(u32 data, void __iomem *addr)
192 {
193 	if (reglog)
194 		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
195 	writel(data, addr);
196 }
197 
198 u32 msm_readl(const void __iomem *addr)
199 {
200 	u32 val = readl(addr);
201 	if (reglog)
202 		pr_err("IO:R %p %08x\n", addr, val);
203 	return val;
204 }
205 
206 struct vblank_event {
207 	struct list_head node;
208 	int crtc_id;
209 	bool enable;
210 };
211 
212 static void vblank_ctrl_worker(struct kthread_work *work)
213 {
214 	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
215 						struct msm_vblank_ctrl, work);
216 	struct msm_drm_private *priv = container_of(vbl_ctrl,
217 					struct msm_drm_private, vblank_ctrl);
218 	struct msm_kms *kms = priv->kms;
219 	struct vblank_event *vbl_ev, *tmp;
220 	unsigned long flags;
221 
222 	spin_lock_irqsave(&vbl_ctrl->lock, flags);
223 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
224 		list_del(&vbl_ev->node);
225 		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
226 
227 		if (vbl_ev->enable)
228 			kms->funcs->enable_vblank(kms,
229 						priv->crtcs[vbl_ev->crtc_id]);
230 		else
231 			kms->funcs->disable_vblank(kms,
232 						priv->crtcs[vbl_ev->crtc_id]);
233 
234 		kfree(vbl_ev);
235 
236 		spin_lock_irqsave(&vbl_ctrl->lock, flags);
237 	}
238 
239 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
240 }
241 
242 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
243 					int crtc_id, bool enable)
244 {
245 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
246 	struct vblank_event *vbl_ev;
247 	unsigned long flags;
248 
249 	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
250 	if (!vbl_ev)
251 		return -ENOMEM;
252 
253 	vbl_ev->crtc_id = crtc_id;
254 	vbl_ev->enable = enable;
255 
256 	spin_lock_irqsave(&vbl_ctrl->lock, flags);
257 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
258 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
259 
260 	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
261 			&vbl_ctrl->work);
262 
263 	return 0;
264 }
265 
266 static int msm_drm_uninit(struct device *dev)
267 {
268 	struct platform_device *pdev = to_platform_device(dev);
269 	struct drm_device *ddev = platform_get_drvdata(pdev);
270 	struct msm_drm_private *priv = ddev->dev_private;
271 	struct msm_kms *kms = priv->kms;
272 	struct msm_mdss *mdss = priv->mdss;
273 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
274 	struct vblank_event *vbl_ev, *tmp;
275 	int i;
276 
277 	/* We must cancel and cleanup any pending vblank enable/disable
278 	 * work before drm_irq_uninstall() to avoid work re-enabling an
279 	 * irq after uninstall has disabled it.
280 	 */
281 	kthread_flush_work(&vbl_ctrl->work);
282 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
283 		list_del(&vbl_ev->node);
284 		kfree(vbl_ev);
285 	}
286 
287 	/* clean up display commit/event worker threads */
288 	for (i = 0; i < priv->num_crtcs; i++) {
289 		if (priv->disp_thread[i].thread) {
290 			kthread_flush_worker(&priv->disp_thread[i].worker);
291 			kthread_stop(priv->disp_thread[i].thread);
292 			priv->disp_thread[i].thread = NULL;
293 		}
294 
295 		if (priv->event_thread[i].thread) {
296 			kthread_flush_worker(&priv->event_thread[i].worker);
297 			kthread_stop(priv->event_thread[i].thread);
298 			priv->event_thread[i].thread = NULL;
299 		}
300 	}
301 
302 	msm_gem_shrinker_cleanup(ddev);
303 
304 	drm_kms_helper_poll_fini(ddev);
305 
306 	drm_dev_unregister(ddev);
307 
308 	msm_perf_debugfs_cleanup(priv);
309 	msm_rd_debugfs_cleanup(priv);
310 
311 #ifdef CONFIG_DRM_FBDEV_EMULATION
312 	if (fbdev && priv->fbdev)
313 		msm_fbdev_free(ddev);
314 #endif
315 	drm_atomic_helper_shutdown(ddev);
316 	drm_mode_config_cleanup(ddev);
317 
318 	pm_runtime_get_sync(dev);
319 	drm_irq_uninstall(ddev);
320 	pm_runtime_put_sync(dev);
321 
322 	flush_workqueue(priv->wq);
323 	destroy_workqueue(priv->wq);
324 
325 	if (kms && kms->funcs)
326 		kms->funcs->destroy(kms);
327 
328 	if (priv->vram.paddr) {
329 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
330 		drm_mm_takedown(&priv->vram.mm);
331 		dma_free_attrs(dev, priv->vram.size, NULL,
332 			       priv->vram.paddr, attrs);
333 	}
334 
335 	component_unbind_all(dev, ddev);
336 
337 	if (mdss && mdss->funcs)
338 		mdss->funcs->destroy(ddev);
339 
340 	ddev->dev_private = NULL;
341 	drm_dev_put(ddev);
342 
343 	kfree(priv);
344 
345 	return 0;
346 }
347 
348 #define KMS_MDP4 4
349 #define KMS_MDP5 5
350 #define KMS_DPU  3
351 
352 static int get_mdp_ver(struct platform_device *pdev)
353 {
354 	struct device *dev = &pdev->dev;
355 
356 	return (int) (unsigned long) of_device_get_match_data(dev);
357 }
358 
359 #include <linux/of_address.h>
360 
361 static int msm_init_vram(struct drm_device *dev)
362 {
363 	struct msm_drm_private *priv = dev->dev_private;
364 	struct device_node *node;
365 	unsigned long size = 0;
366 	int ret = 0;
367 
368 	/* In the device-tree world, we could have a 'memory-region'
369 	 * phandle, which gives us a link to our "vram".  Allocating
370 	 * is all nicely abstracted behind the dma api, but we need
371 	 * to know the entire size to allocate it all in one go. There
372 	 * are two cases:
373 	 *  1) device with no IOMMU, in which case we need exclusive
374 	 *     access to a VRAM carveout big enough for all gpu
375 	 *     buffers
376 	 *  2) device with IOMMU, but where the bootloader puts up
377 	 *     a splash screen.  In this case, the VRAM carveout
378 	 *     need only be large enough for fbdev fb.  But we need
379 	 *     exclusive access to the buffer to avoid the kernel
380 	 *     using those pages for other purposes (which appears
381 	 *     as corruption on screen before we have a chance to
382 	 *     load and do initial modeset)
383 	 */
384 
385 	node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
386 	if (node) {
387 		struct resource r;
388 		ret = of_address_to_resource(node, 0, &r);
389 		of_node_put(node);
390 		if (ret)
391 			return ret;
392 		size = r.end - r.start;
393 		DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
394 
395 		/* if we have no IOMMU, then we need to use carveout allocator.
396 		 * Grab the entire CMA chunk carved out in early startup in
397 		 * mach-msm:
398 		 */
399 	} else if (!iommu_present(&platform_bus_type)) {
400 		DRM_INFO("using %s VRAM carveout\n", vram);
401 		size = memparse(vram, NULL);
402 	}
403 
404 	if (size) {
405 		unsigned long attrs = 0;
406 		void *p;
407 
408 		priv->vram.size = size;
409 
410 		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
411 		spin_lock_init(&priv->vram.lock);
412 
413 		attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
414 		attrs |= DMA_ATTR_WRITE_COMBINE;
415 
416 		/* note that for no-kernel-mapping, the vaddr returned
417 		 * is bogus, but non-null if allocation succeeded:
418 		 */
419 		p = dma_alloc_attrs(dev->dev, size,
420 				&priv->vram.paddr, GFP_KERNEL, attrs);
421 		if (!p) {
422 			dev_err(dev->dev, "failed to allocate VRAM\n");
423 			priv->vram.paddr = 0;
424 			return -ENOMEM;
425 		}
426 
427 		dev_info(dev->dev, "VRAM: %08x->%08x\n",
428 				(uint32_t)priv->vram.paddr,
429 				(uint32_t)(priv->vram.paddr + size));
430 	}
431 
432 	return ret;
433 }
434 
435 static int msm_drm_init(struct device *dev, struct drm_driver *drv)
436 {
437 	struct platform_device *pdev = to_platform_device(dev);
438 	struct drm_device *ddev;
439 	struct msm_drm_private *priv;
440 	struct msm_kms *kms;
441 	struct msm_mdss *mdss;
442 	int ret, i;
443 	struct sched_param param;
444 
445 	ddev = drm_dev_alloc(drv, dev);
446 	if (IS_ERR(ddev)) {
447 		dev_err(dev, "failed to allocate drm_device\n");
448 		return PTR_ERR(ddev);
449 	}
450 
451 	platform_set_drvdata(pdev, ddev);
452 
453 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
454 	if (!priv) {
455 		ret = -ENOMEM;
456 		goto err_put_drm_dev;
457 	}
458 
459 	ddev->dev_private = priv;
460 	priv->dev = ddev;
461 
462 	switch (get_mdp_ver(pdev)) {
463 	case KMS_MDP5:
464 		ret = mdp5_mdss_init(ddev);
465 		break;
466 	case KMS_DPU:
467 		ret = dpu_mdss_init(ddev);
468 		break;
469 	default:
470 		ret = 0;
471 		break;
472 	}
473 	if (ret)
474 		goto err_free_priv;
475 
476 	mdss = priv->mdss;
477 
478 	priv->wq = alloc_ordered_workqueue("msm", 0);
479 
480 	INIT_LIST_HEAD(&priv->inactive_list);
481 	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
482 	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
483 	spin_lock_init(&priv->vblank_ctrl.lock);
484 
485 	drm_mode_config_init(ddev);
486 
487 	/* Bind all our sub-components: */
488 	ret = component_bind_all(dev, ddev);
489 	if (ret)
490 		goto err_destroy_mdss;
491 
492 	ret = msm_init_vram(ddev);
493 	if (ret)
494 		goto err_msm_uninit;
495 
496 	msm_gem_shrinker_init(ddev);
497 
498 	switch (get_mdp_ver(pdev)) {
499 	case KMS_MDP4:
500 		kms = mdp4_kms_init(ddev);
501 		priv->kms = kms;
502 		break;
503 	case KMS_MDP5:
504 		kms = mdp5_kms_init(ddev);
505 		break;
506 	case KMS_DPU:
507 		kms = dpu_kms_init(ddev);
508 		priv->kms = kms;
509 		break;
510 	default:
511 		kms = ERR_PTR(-ENODEV);
512 		break;
513 	}
514 
515 	if (IS_ERR(kms)) {
516 		/*
517 		 * NOTE: once we have GPU support, having no kms should not
518 		 * be considered fatal.. ideally we would still support gpu
519 		 * and (for example) use dmabuf/prime to share buffers with
520 		 * imx drm driver on iMX5
521 		 */
522 		dev_err(dev, "failed to load kms\n");
523 		ret = PTR_ERR(kms);
524 		goto err_msm_uninit;
525 	}
526 
527 	/* Enable normalization of plane zpos */
528 	ddev->mode_config.normalize_zpos = true;
529 
530 	if (kms) {
531 		ret = kms->funcs->hw_init(kms);
532 		if (ret) {
533 			dev_err(dev, "kms hw init failed: %d\n", ret);
534 			goto err_msm_uninit;
535 		}
536 	}
537 
538 	ddev->mode_config.funcs = &mode_config_funcs;
539 	ddev->mode_config.helper_private = &mode_config_helper_funcs;
540 
541 	/**
542 	 * this priority was found during empiric testing to have appropriate
543 	 * realtime scheduling to process display updates and interact with
544 	 * other real time and normal priority task
545 	 */
546 	param.sched_priority = 16;
547 	for (i = 0; i < priv->num_crtcs; i++) {
548 
549 		/* initialize display thread */
550 		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
551 		kthread_init_worker(&priv->disp_thread[i].worker);
552 		priv->disp_thread[i].dev = ddev;
553 		priv->disp_thread[i].thread =
554 			kthread_run(kthread_worker_fn,
555 				&priv->disp_thread[i].worker,
556 				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
557 		ret = sched_setscheduler(priv->disp_thread[i].thread,
558 							SCHED_FIFO, &param);
559 		if (ret)
560 			pr_warn("display thread priority update failed: %d\n",
561 									ret);
562 
563 		if (IS_ERR(priv->disp_thread[i].thread)) {
564 			dev_err(dev, "failed to create crtc_commit kthread\n");
565 			priv->disp_thread[i].thread = NULL;
566 		}
567 
568 		/* initialize event thread */
569 		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
570 		kthread_init_worker(&priv->event_thread[i].worker);
571 		priv->event_thread[i].dev = ddev;
572 		priv->event_thread[i].thread =
573 			kthread_run(kthread_worker_fn,
574 				&priv->event_thread[i].worker,
575 				"crtc_event:%d", priv->event_thread[i].crtc_id);
576 		/**
577 		 * event thread should also run at same priority as disp_thread
578 		 * because it is handling frame_done events. A lower priority
579 		 * event thread and higher priority disp_thread can causes
580 		 * frame_pending counters beyond 2. This can lead to commit
581 		 * failure at crtc commit level.
582 		 */
583 		ret = sched_setscheduler(priv->event_thread[i].thread,
584 							SCHED_FIFO, &param);
585 		if (ret)
586 			pr_warn("display event thread priority update failed: %d\n",
587 									ret);
588 
589 		if (IS_ERR(priv->event_thread[i].thread)) {
590 			dev_err(dev, "failed to create crtc_event kthread\n");
591 			priv->event_thread[i].thread = NULL;
592 		}
593 
594 		if ((!priv->disp_thread[i].thread) ||
595 				!priv->event_thread[i].thread) {
596 			/* clean up previously created threads if any */
597 			for ( ; i >= 0; i--) {
598 				if (priv->disp_thread[i].thread) {
599 					kthread_stop(
600 						priv->disp_thread[i].thread);
601 					priv->disp_thread[i].thread = NULL;
602 				}
603 
604 				if (priv->event_thread[i].thread) {
605 					kthread_stop(
606 						priv->event_thread[i].thread);
607 					priv->event_thread[i].thread = NULL;
608 				}
609 			}
610 			goto err_msm_uninit;
611 		}
612 	}
613 
614 	ret = drm_vblank_init(ddev, priv->num_crtcs);
615 	if (ret < 0) {
616 		dev_err(dev, "failed to initialize vblank\n");
617 		goto err_msm_uninit;
618 	}
619 
620 	if (kms) {
621 		pm_runtime_get_sync(dev);
622 		ret = drm_irq_install(ddev, kms->irq);
623 		pm_runtime_put_sync(dev);
624 		if (ret < 0) {
625 			dev_err(dev, "failed to install IRQ handler\n");
626 			goto err_msm_uninit;
627 		}
628 	}
629 
630 	ret = drm_dev_register(ddev, 0);
631 	if (ret)
632 		goto err_msm_uninit;
633 
634 	drm_mode_config_reset(ddev);
635 
636 #ifdef CONFIG_DRM_FBDEV_EMULATION
637 	if (fbdev)
638 		priv->fbdev = msm_fbdev_init(ddev);
639 #endif
640 
641 	ret = msm_debugfs_late_init(ddev);
642 	if (ret)
643 		goto err_msm_uninit;
644 
645 	drm_kms_helper_poll_init(ddev);
646 
647 	return 0;
648 
649 err_msm_uninit:
650 	msm_drm_uninit(dev);
651 	return ret;
652 err_destroy_mdss:
653 	if (mdss && mdss->funcs)
654 		mdss->funcs->destroy(ddev);
655 err_free_priv:
656 	kfree(priv);
657 err_put_drm_dev:
658 	drm_dev_put(ddev);
659 	return ret;
660 }
661 
662 /*
663  * DRM operations:
664  */
665 
666 static void load_gpu(struct drm_device *dev)
667 {
668 	static DEFINE_MUTEX(init_lock);
669 	struct msm_drm_private *priv = dev->dev_private;
670 
671 	mutex_lock(&init_lock);
672 
673 	if (!priv->gpu)
674 		priv->gpu = adreno_load_gpu(dev);
675 
676 	mutex_unlock(&init_lock);
677 }
678 
679 static int context_init(struct drm_device *dev, struct drm_file *file)
680 {
681 	struct msm_file_private *ctx;
682 
683 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
684 	if (!ctx)
685 		return -ENOMEM;
686 
687 	msm_submitqueue_init(dev, ctx);
688 
689 	file->driver_priv = ctx;
690 
691 	return 0;
692 }
693 
694 static int msm_open(struct drm_device *dev, struct drm_file *file)
695 {
696 	/* For now, load gpu on open.. to avoid the requirement of having
697 	 * firmware in the initrd.
698 	 */
699 	load_gpu(dev);
700 
701 	return context_init(dev, file);
702 }
703 
704 static void context_close(struct msm_file_private *ctx)
705 {
706 	msm_submitqueue_close(ctx);
707 	kfree(ctx);
708 }
709 
710 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
711 {
712 	struct msm_drm_private *priv = dev->dev_private;
713 	struct msm_file_private *ctx = file->driver_priv;
714 
715 	mutex_lock(&dev->struct_mutex);
716 	if (ctx == priv->lastctx)
717 		priv->lastctx = NULL;
718 	mutex_unlock(&dev->struct_mutex);
719 
720 	context_close(ctx);
721 }
722 
723 static irqreturn_t msm_irq(int irq, void *arg)
724 {
725 	struct drm_device *dev = arg;
726 	struct msm_drm_private *priv = dev->dev_private;
727 	struct msm_kms *kms = priv->kms;
728 	BUG_ON(!kms);
729 	return kms->funcs->irq(kms);
730 }
731 
732 static void msm_irq_preinstall(struct drm_device *dev)
733 {
734 	struct msm_drm_private *priv = dev->dev_private;
735 	struct msm_kms *kms = priv->kms;
736 	BUG_ON(!kms);
737 	kms->funcs->irq_preinstall(kms);
738 }
739 
740 static int msm_irq_postinstall(struct drm_device *dev)
741 {
742 	struct msm_drm_private *priv = dev->dev_private;
743 	struct msm_kms *kms = priv->kms;
744 	BUG_ON(!kms);
745 	return kms->funcs->irq_postinstall(kms);
746 }
747 
748 static void msm_irq_uninstall(struct drm_device *dev)
749 {
750 	struct msm_drm_private *priv = dev->dev_private;
751 	struct msm_kms *kms = priv->kms;
752 	BUG_ON(!kms);
753 	kms->funcs->irq_uninstall(kms);
754 }
755 
756 static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
757 {
758 	struct msm_drm_private *priv = dev->dev_private;
759 	struct msm_kms *kms = priv->kms;
760 	if (!kms)
761 		return -ENXIO;
762 	DBG("dev=%p, crtc=%u", dev, pipe);
763 	return vblank_ctrl_queue_work(priv, pipe, true);
764 }
765 
766 static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
767 {
768 	struct msm_drm_private *priv = dev->dev_private;
769 	struct msm_kms *kms = priv->kms;
770 	if (!kms)
771 		return;
772 	DBG("dev=%p, crtc=%u", dev, pipe);
773 	vblank_ctrl_queue_work(priv, pipe, false);
774 }
775 
776 /*
777  * DRM ioctls:
778  */
779 
780 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
781 		struct drm_file *file)
782 {
783 	struct msm_drm_private *priv = dev->dev_private;
784 	struct drm_msm_param *args = data;
785 	struct msm_gpu *gpu;
786 
787 	/* for now, we just have 3d pipe.. eventually this would need to
788 	 * be more clever to dispatch to appropriate gpu module:
789 	 */
790 	if (args->pipe != MSM_PIPE_3D0)
791 		return -EINVAL;
792 
793 	gpu = priv->gpu;
794 
795 	if (!gpu)
796 		return -ENXIO;
797 
798 	return gpu->funcs->get_param(gpu, args->param, &args->value);
799 }
800 
801 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
802 		struct drm_file *file)
803 {
804 	struct drm_msm_gem_new *args = data;
805 
806 	if (args->flags & ~MSM_BO_FLAGS) {
807 		DRM_ERROR("invalid flags: %08x\n", args->flags);
808 		return -EINVAL;
809 	}
810 
811 	return msm_gem_new_handle(dev, file, args->size,
812 			args->flags, &args->handle);
813 }
814 
815 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
816 {
817 	return ktime_set(timeout.tv_sec, timeout.tv_nsec);
818 }
819 
820 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
821 		struct drm_file *file)
822 {
823 	struct drm_msm_gem_cpu_prep *args = data;
824 	struct drm_gem_object *obj;
825 	ktime_t timeout = to_ktime(args->timeout);
826 	int ret;
827 
828 	if (args->op & ~MSM_PREP_FLAGS) {
829 		DRM_ERROR("invalid op: %08x\n", args->op);
830 		return -EINVAL;
831 	}
832 
833 	obj = drm_gem_object_lookup(file, args->handle);
834 	if (!obj)
835 		return -ENOENT;
836 
837 	ret = msm_gem_cpu_prep(obj, args->op, &timeout);
838 
839 	drm_gem_object_put_unlocked(obj);
840 
841 	return ret;
842 }
843 
844 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
845 		struct drm_file *file)
846 {
847 	struct drm_msm_gem_cpu_fini *args = data;
848 	struct drm_gem_object *obj;
849 	int ret;
850 
851 	obj = drm_gem_object_lookup(file, args->handle);
852 	if (!obj)
853 		return -ENOENT;
854 
855 	ret = msm_gem_cpu_fini(obj);
856 
857 	drm_gem_object_put_unlocked(obj);
858 
859 	return ret;
860 }
861 
862 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
863 		struct drm_gem_object *obj, uint64_t *iova)
864 {
865 	struct msm_drm_private *priv = dev->dev_private;
866 
867 	if (!priv->gpu)
868 		return -EINVAL;
869 
870 	return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
871 }
872 
873 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
874 		struct drm_file *file)
875 {
876 	struct drm_msm_gem_info *args = data;
877 	struct drm_gem_object *obj;
878 	int ret = 0;
879 
880 	if (args->flags & ~MSM_INFO_FLAGS)
881 		return -EINVAL;
882 
883 	obj = drm_gem_object_lookup(file, args->handle);
884 	if (!obj)
885 		return -ENOENT;
886 
887 	if (args->flags & MSM_INFO_IOVA) {
888 		uint64_t iova;
889 
890 		ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
891 		if (!ret)
892 			args->offset = iova;
893 	} else {
894 		args->offset = msm_gem_mmap_offset(obj);
895 	}
896 
897 	drm_gem_object_put_unlocked(obj);
898 
899 	return ret;
900 }
901 
902 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
903 		struct drm_file *file)
904 {
905 	struct msm_drm_private *priv = dev->dev_private;
906 	struct drm_msm_wait_fence *args = data;
907 	ktime_t timeout = to_ktime(args->timeout);
908 	struct msm_gpu_submitqueue *queue;
909 	struct msm_gpu *gpu = priv->gpu;
910 	int ret;
911 
912 	if (args->pad) {
913 		DRM_ERROR("invalid pad: %08x\n", args->pad);
914 		return -EINVAL;
915 	}
916 
917 	if (!gpu)
918 		return 0;
919 
920 	queue = msm_submitqueue_get(file->driver_priv, args->queueid);
921 	if (!queue)
922 		return -ENOENT;
923 
924 	ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
925 		true);
926 
927 	msm_submitqueue_put(queue);
928 	return ret;
929 }
930 
931 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
932 		struct drm_file *file)
933 {
934 	struct drm_msm_gem_madvise *args = data;
935 	struct drm_gem_object *obj;
936 	int ret;
937 
938 	switch (args->madv) {
939 	case MSM_MADV_DONTNEED:
940 	case MSM_MADV_WILLNEED:
941 		break;
942 	default:
943 		return -EINVAL;
944 	}
945 
946 	ret = mutex_lock_interruptible(&dev->struct_mutex);
947 	if (ret)
948 		return ret;
949 
950 	obj = drm_gem_object_lookup(file, args->handle);
951 	if (!obj) {
952 		ret = -ENOENT;
953 		goto unlock;
954 	}
955 
956 	ret = msm_gem_madvise(obj, args->madv);
957 	if (ret >= 0) {
958 		args->retained = ret;
959 		ret = 0;
960 	}
961 
962 	drm_gem_object_put(obj);
963 
964 unlock:
965 	mutex_unlock(&dev->struct_mutex);
966 	return ret;
967 }
968 
969 
970 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
971 		struct drm_file *file)
972 {
973 	struct drm_msm_submitqueue *args = data;
974 
975 	if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
976 		return -EINVAL;
977 
978 	return msm_submitqueue_create(dev, file->driver_priv, args->prio,
979 		args->flags, &args->id);
980 }
981 
982 
983 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
984 		struct drm_file *file)
985 {
986 	u32 id = *(u32 *) data;
987 
988 	return msm_submitqueue_remove(file->driver_priv, id);
989 }
990 
991 static const struct drm_ioctl_desc msm_ioctls[] = {
992 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
993 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
994 	DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
995 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
996 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
997 	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
998 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
999 	DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
1000 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
1001 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
1002 };
1003 
1004 static const struct vm_operations_struct vm_ops = {
1005 	.fault = msm_gem_fault,
1006 	.open = drm_gem_vm_open,
1007 	.close = drm_gem_vm_close,
1008 };
1009 
1010 static const struct file_operations fops = {
1011 	.owner              = THIS_MODULE,
1012 	.open               = drm_open,
1013 	.release            = drm_release,
1014 	.unlocked_ioctl     = drm_ioctl,
1015 	.compat_ioctl       = drm_compat_ioctl,
1016 	.poll               = drm_poll,
1017 	.read               = drm_read,
1018 	.llseek             = no_llseek,
1019 	.mmap               = msm_gem_mmap,
1020 };
1021 
1022 static struct drm_driver msm_driver = {
1023 	.driver_features    = DRIVER_HAVE_IRQ |
1024 				DRIVER_GEM |
1025 				DRIVER_PRIME |
1026 				DRIVER_RENDER |
1027 				DRIVER_ATOMIC |
1028 				DRIVER_MODESET,
1029 	.open               = msm_open,
1030 	.postclose           = msm_postclose,
1031 	.lastclose          = drm_fb_helper_lastclose,
1032 	.irq_handler        = msm_irq,
1033 	.irq_preinstall     = msm_irq_preinstall,
1034 	.irq_postinstall    = msm_irq_postinstall,
1035 	.irq_uninstall      = msm_irq_uninstall,
1036 	.enable_vblank      = msm_enable_vblank,
1037 	.disable_vblank     = msm_disable_vblank,
1038 	.gem_free_object    = msm_gem_free_object,
1039 	.gem_vm_ops         = &vm_ops,
1040 	.dumb_create        = msm_gem_dumb_create,
1041 	.dumb_map_offset    = msm_gem_dumb_map_offset,
1042 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1043 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1044 	.gem_prime_export   = drm_gem_prime_export,
1045 	.gem_prime_import   = drm_gem_prime_import,
1046 	.gem_prime_res_obj  = msm_gem_prime_res_obj,
1047 	.gem_prime_pin      = msm_gem_prime_pin,
1048 	.gem_prime_unpin    = msm_gem_prime_unpin,
1049 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
1050 	.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1051 	.gem_prime_vmap     = msm_gem_prime_vmap,
1052 	.gem_prime_vunmap   = msm_gem_prime_vunmap,
1053 	.gem_prime_mmap     = msm_gem_prime_mmap,
1054 #ifdef CONFIG_DEBUG_FS
1055 	.debugfs_init       = msm_debugfs_init,
1056 #endif
1057 	.ioctls             = msm_ioctls,
1058 	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
1059 	.fops               = &fops,
1060 	.name               = "msm",
1061 	.desc               = "MSM Snapdragon DRM",
1062 	.date               = "20130625",
1063 	.major              = MSM_VERSION_MAJOR,
1064 	.minor              = MSM_VERSION_MINOR,
1065 	.patchlevel         = MSM_VERSION_PATCHLEVEL,
1066 };
1067 
1068 #ifdef CONFIG_PM_SLEEP
1069 static int msm_pm_suspend(struct device *dev)
1070 {
1071 	struct drm_device *ddev = dev_get_drvdata(dev);
1072 	struct msm_drm_private *priv = ddev->dev_private;
1073 	struct msm_kms *kms = priv->kms;
1074 
1075 	/* TODO: Use atomic helper suspend/resume */
1076 	if (kms && kms->funcs && kms->funcs->pm_suspend)
1077 		return kms->funcs->pm_suspend(dev);
1078 
1079 	drm_kms_helper_poll_disable(ddev);
1080 
1081 	priv->pm_state = drm_atomic_helper_suspend(ddev);
1082 	if (IS_ERR(priv->pm_state)) {
1083 		drm_kms_helper_poll_enable(ddev);
1084 		return PTR_ERR(priv->pm_state);
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static int msm_pm_resume(struct device *dev)
1091 {
1092 	struct drm_device *ddev = dev_get_drvdata(dev);
1093 	struct msm_drm_private *priv = ddev->dev_private;
1094 	struct msm_kms *kms = priv->kms;
1095 
1096 	/* TODO: Use atomic helper suspend/resume */
1097 	if (kms && kms->funcs && kms->funcs->pm_resume)
1098 		return kms->funcs->pm_resume(dev);
1099 
1100 	drm_atomic_helper_resume(ddev, priv->pm_state);
1101 	drm_kms_helper_poll_enable(ddev);
1102 
1103 	return 0;
1104 }
1105 #endif
1106 
1107 #ifdef CONFIG_PM
1108 static int msm_runtime_suspend(struct device *dev)
1109 {
1110 	struct drm_device *ddev = dev_get_drvdata(dev);
1111 	struct msm_drm_private *priv = ddev->dev_private;
1112 	struct msm_mdss *mdss = priv->mdss;
1113 
1114 	DBG("");
1115 
1116 	if (mdss && mdss->funcs)
1117 		return mdss->funcs->disable(mdss);
1118 
1119 	return 0;
1120 }
1121 
1122 static int msm_runtime_resume(struct device *dev)
1123 {
1124 	struct drm_device *ddev = dev_get_drvdata(dev);
1125 	struct msm_drm_private *priv = ddev->dev_private;
1126 	struct msm_mdss *mdss = priv->mdss;
1127 
1128 	DBG("");
1129 
1130 	if (mdss && mdss->funcs)
1131 		return mdss->funcs->enable(mdss);
1132 
1133 	return 0;
1134 }
1135 #endif
1136 
1137 static const struct dev_pm_ops msm_pm_ops = {
1138 	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
1139 	SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
1140 };
1141 
1142 /*
1143  * Componentized driver support:
1144  */
1145 
1146 /*
1147  * NOTE: duplication of the same code as exynos or imx (or probably any other).
1148  * so probably some room for some helpers
1149  */
1150 static int compare_of(struct device *dev, void *data)
1151 {
1152 	return dev->of_node == data;
1153 }
1154 
1155 /*
1156  * Identify what components need to be added by parsing what remote-endpoints
1157  * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1158  * is no external component that we need to add since LVDS is within MDP4
1159  * itself.
1160  */
1161 static int add_components_mdp(struct device *mdp_dev,
1162 			      struct component_match **matchptr)
1163 {
1164 	struct device_node *np = mdp_dev->of_node;
1165 	struct device_node *ep_node;
1166 	struct device *master_dev;
1167 
1168 	/*
1169 	 * on MDP4 based platforms, the MDP platform device is the component
1170 	 * master that adds other display interface components to itself.
1171 	 *
1172 	 * on MDP5 based platforms, the MDSS platform device is the component
1173 	 * master that adds MDP5 and other display interface components to
1174 	 * itself.
1175 	 */
1176 	if (of_device_is_compatible(np, "qcom,mdp4"))
1177 		master_dev = mdp_dev;
1178 	else
1179 		master_dev = mdp_dev->parent;
1180 
1181 	for_each_endpoint_of_node(np, ep_node) {
1182 		struct device_node *intf;
1183 		struct of_endpoint ep;
1184 		int ret;
1185 
1186 		ret = of_graph_parse_endpoint(ep_node, &ep);
1187 		if (ret) {
1188 			dev_err(mdp_dev, "unable to parse port endpoint\n");
1189 			of_node_put(ep_node);
1190 			return ret;
1191 		}
1192 
1193 		/*
1194 		 * The LCDC/LVDS port on MDP4 is a speacial case where the
1195 		 * remote-endpoint isn't a component that we need to add
1196 		 */
1197 		if (of_device_is_compatible(np, "qcom,mdp4") &&
1198 		    ep.port == 0)
1199 			continue;
1200 
1201 		/*
1202 		 * It's okay if some of the ports don't have a remote endpoint
1203 		 * specified. It just means that the port isn't connected to
1204 		 * any external interface.
1205 		 */
1206 		intf = of_graph_get_remote_port_parent(ep_node);
1207 		if (!intf)
1208 			continue;
1209 
1210 		drm_of_component_match_add(master_dev, matchptr, compare_of,
1211 					   intf);
1212 		of_node_put(intf);
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 static int compare_name_mdp(struct device *dev, void *data)
1219 {
1220 	return (strstr(dev_name(dev), "mdp") != NULL);
1221 }
1222 
1223 static int add_display_components(struct device *dev,
1224 				  struct component_match **matchptr)
1225 {
1226 	struct device *mdp_dev;
1227 	int ret;
1228 
1229 	/*
1230 	 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
1231 	 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
1232 	 * Populate the children devices, find the MDP5/DPU node, and then add
1233 	 * the interfaces to our components list.
1234 	 */
1235 	if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
1236 	    of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
1237 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1238 		if (ret) {
1239 			dev_err(dev, "failed to populate children devices\n");
1240 			return ret;
1241 		}
1242 
1243 		mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1244 		if (!mdp_dev) {
1245 			dev_err(dev, "failed to find MDSS MDP node\n");
1246 			of_platform_depopulate(dev);
1247 			return -ENODEV;
1248 		}
1249 
1250 		put_device(mdp_dev);
1251 
1252 		/* add the MDP component itself */
1253 		drm_of_component_match_add(dev, matchptr, compare_of,
1254 					   mdp_dev->of_node);
1255 	} else {
1256 		/* MDP4 */
1257 		mdp_dev = dev;
1258 	}
1259 
1260 	ret = add_components_mdp(mdp_dev, matchptr);
1261 	if (ret)
1262 		of_platform_depopulate(dev);
1263 
1264 	return ret;
1265 }
1266 
1267 /*
1268  * We don't know what's the best binding to link the gpu with the drm device.
1269  * Fow now, we just hunt for all the possible gpus that we support, and add them
1270  * as components.
1271  */
1272 static const struct of_device_id msm_gpu_match[] = {
1273 	{ .compatible = "qcom,adreno" },
1274 	{ .compatible = "qcom,adreno-3xx" },
1275 	{ .compatible = "qcom,kgsl-3d0" },
1276 	{ },
1277 };
1278 
1279 static int add_gpu_components(struct device *dev,
1280 			      struct component_match **matchptr)
1281 {
1282 	struct device_node *np;
1283 
1284 	np = of_find_matching_node(NULL, msm_gpu_match);
1285 	if (!np)
1286 		return 0;
1287 
1288 	drm_of_component_match_add(dev, matchptr, compare_of, np);
1289 
1290 	of_node_put(np);
1291 
1292 	return 0;
1293 }
1294 
1295 static int msm_drm_bind(struct device *dev)
1296 {
1297 	return msm_drm_init(dev, &msm_driver);
1298 }
1299 
1300 static void msm_drm_unbind(struct device *dev)
1301 {
1302 	msm_drm_uninit(dev);
1303 }
1304 
1305 static const struct component_master_ops msm_drm_ops = {
1306 	.bind = msm_drm_bind,
1307 	.unbind = msm_drm_unbind,
1308 };
1309 
1310 /*
1311  * Platform driver:
1312  */
1313 
1314 static int msm_pdev_probe(struct platform_device *pdev)
1315 {
1316 	struct component_match *match = NULL;
1317 	int ret;
1318 
1319 	ret = add_display_components(&pdev->dev, &match);
1320 	if (ret)
1321 		return ret;
1322 
1323 	ret = add_gpu_components(&pdev->dev, &match);
1324 	if (ret)
1325 		return ret;
1326 
1327 	/* on all devices that I am aware of, iommu's which can map
1328 	 * any address the cpu can see are used:
1329 	 */
1330 	ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1331 	if (ret)
1332 		return ret;
1333 
1334 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1335 }
1336 
1337 static int msm_pdev_remove(struct platform_device *pdev)
1338 {
1339 	component_master_del(&pdev->dev, &msm_drm_ops);
1340 	of_platform_depopulate(&pdev->dev);
1341 
1342 	return 0;
1343 }
1344 
1345 static const struct of_device_id dt_match[] = {
1346 	{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
1347 	{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
1348 	{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
1349 	{}
1350 };
1351 MODULE_DEVICE_TABLE(of, dt_match);
1352 
1353 static struct platform_driver msm_platform_driver = {
1354 	.probe      = msm_pdev_probe,
1355 	.remove     = msm_pdev_remove,
1356 	.driver     = {
1357 		.name   = "msm",
1358 		.of_match_table = dt_match,
1359 		.pm     = &msm_pm_ops,
1360 	},
1361 };
1362 
1363 static int __init msm_drm_register(void)
1364 {
1365 	if (!modeset)
1366 		return -EINVAL;
1367 
1368 	DBG("init");
1369 	msm_mdp_register();
1370 	msm_dpu_register();
1371 	msm_dsi_register();
1372 	msm_edp_register();
1373 	msm_hdmi_register();
1374 	adreno_register();
1375 	return platform_driver_register(&msm_platform_driver);
1376 }
1377 
1378 static void __exit msm_drm_unregister(void)
1379 {
1380 	DBG("fini");
1381 	platform_driver_unregister(&msm_platform_driver);
1382 	msm_hdmi_unregister();
1383 	adreno_unregister();
1384 	msm_edp_unregister();
1385 	msm_dsi_unregister();
1386 	msm_mdp_unregister();
1387 	msm_dpu_unregister();
1388 }
1389 
1390 module_init(msm_drm_register);
1391 module_exit(msm_drm_unregister);
1392 
1393 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1394 MODULE_DESCRIPTION("MSM DRM Driver");
1395 MODULE_LICENSE("GPL");
1396