xref: /openbmc/linux/drivers/gpu/drm/msm/msm_drv.c (revision d7a3d85e)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_drv.h"
19 #include "msm_gpu.h"
20 #include "msm_kms.h"
21 
22 static void msm_fb_output_poll_changed(struct drm_device *dev)
23 {
24 #ifdef CONFIG_DRM_MSM_FBDEV
25 	struct msm_drm_private *priv = dev->dev_private;
26 	if (priv->fbdev)
27 		drm_fb_helper_hotplug_event(priv->fbdev);
28 #endif
29 }
30 
31 static const struct drm_mode_config_funcs mode_config_funcs = {
32 	.fb_create = msm_framebuffer_create,
33 	.output_poll_changed = msm_fb_output_poll_changed,
34 	.atomic_check = msm_atomic_check,
35 	.atomic_commit = msm_atomic_commit,
36 };
37 
38 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
39 {
40 	struct msm_drm_private *priv = dev->dev_private;
41 	int idx = priv->num_mmus++;
42 
43 	if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
44 		return -EINVAL;
45 
46 	priv->mmus[idx] = mmu;
47 
48 	return idx;
49 }
50 
51 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
52 static bool reglog = false;
53 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
54 module_param(reglog, bool, 0600);
55 #else
56 #define reglog 0
57 #endif
58 
59 #ifdef CONFIG_DRM_MSM_FBDEV
60 static bool fbdev = true;
61 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
62 module_param(fbdev, bool, 0600);
63 #endif
64 
65 static char *vram = "16m";
66 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
67 module_param(vram, charp, 0);
68 
69 /*
70  * Util/helpers:
71  */
72 
73 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
74 		const char *dbgname)
75 {
76 	struct resource *res;
77 	unsigned long size;
78 	void __iomem *ptr;
79 
80 	if (name)
81 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
82 	else
83 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
84 
85 	if (!res) {
86 		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
87 		return ERR_PTR(-EINVAL);
88 	}
89 
90 	size = resource_size(res);
91 
92 	ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
93 	if (!ptr) {
94 		dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
95 		return ERR_PTR(-ENOMEM);
96 	}
97 
98 	if (reglog)
99 		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
100 
101 	return ptr;
102 }
103 
104 void msm_writel(u32 data, void __iomem *addr)
105 {
106 	if (reglog)
107 		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
108 	writel(data, addr);
109 }
110 
111 u32 msm_readl(const void __iomem *addr)
112 {
113 	u32 val = readl(addr);
114 	if (reglog)
115 		printk(KERN_ERR "IO:R %p %08x\n", addr, val);
116 	return val;
117 }
118 
119 /*
120  * DRM operations:
121  */
122 
123 static int msm_unload(struct drm_device *dev)
124 {
125 	struct msm_drm_private *priv = dev->dev_private;
126 	struct msm_kms *kms = priv->kms;
127 	struct msm_gpu *gpu = priv->gpu;
128 
129 	drm_kms_helper_poll_fini(dev);
130 	drm_mode_config_cleanup(dev);
131 	drm_vblank_cleanup(dev);
132 
133 	pm_runtime_get_sync(dev->dev);
134 	drm_irq_uninstall(dev);
135 	pm_runtime_put_sync(dev->dev);
136 
137 	flush_workqueue(priv->wq);
138 	destroy_workqueue(priv->wq);
139 
140 	if (kms) {
141 		pm_runtime_disable(dev->dev);
142 		kms->funcs->destroy(kms);
143 	}
144 
145 	if (gpu) {
146 		mutex_lock(&dev->struct_mutex);
147 		gpu->funcs->pm_suspend(gpu);
148 		mutex_unlock(&dev->struct_mutex);
149 		gpu->funcs->destroy(gpu);
150 	}
151 
152 	if (priv->vram.paddr) {
153 		DEFINE_DMA_ATTRS(attrs);
154 		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
155 		drm_mm_takedown(&priv->vram.mm);
156 		dma_free_attrs(dev->dev, priv->vram.size, NULL,
157 				priv->vram.paddr, &attrs);
158 	}
159 
160 	component_unbind_all(dev->dev, dev);
161 
162 	dev->dev_private = NULL;
163 
164 	kfree(priv);
165 
166 	return 0;
167 }
168 
169 static int get_mdp_ver(struct platform_device *pdev)
170 {
171 #ifdef CONFIG_OF
172 	static const struct of_device_id match_types[] = { {
173 		.compatible = "qcom,mdss_mdp",
174 		.data	= (void	*)5,
175 	}, {
176 		/* end node */
177 	} };
178 	struct device *dev = &pdev->dev;
179 	const struct of_device_id *match;
180 	match = of_match_node(match_types, dev->of_node);
181 	if (match)
182 		return (int)(unsigned long)match->data;
183 #endif
184 	return 4;
185 }
186 
187 #include <linux/of_address.h>
188 
189 static int msm_init_vram(struct drm_device *dev)
190 {
191 	struct msm_drm_private *priv = dev->dev_private;
192 	unsigned long size = 0;
193 	int ret = 0;
194 
195 #ifdef CONFIG_OF
196 	/* In the device-tree world, we could have a 'memory-region'
197 	 * phandle, which gives us a link to our "vram".  Allocating
198 	 * is all nicely abstracted behind the dma api, but we need
199 	 * to know the entire size to allocate it all in one go. There
200 	 * are two cases:
201 	 *  1) device with no IOMMU, in which case we need exclusive
202 	 *     access to a VRAM carveout big enough for all gpu
203 	 *     buffers
204 	 *  2) device with IOMMU, but where the bootloader puts up
205 	 *     a splash screen.  In this case, the VRAM carveout
206 	 *     need only be large enough for fbdev fb.  But we need
207 	 *     exclusive access to the buffer to avoid the kernel
208 	 *     using those pages for other purposes (which appears
209 	 *     as corruption on screen before we have a chance to
210 	 *     load and do initial modeset)
211 	 */
212 	struct device_node *node;
213 
214 	node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
215 	if (node) {
216 		struct resource r;
217 		ret = of_address_to_resource(node, 0, &r);
218 		if (ret)
219 			return ret;
220 		size = r.end - r.start;
221 		DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
222 	} else
223 #endif
224 
225 	/* if we have no IOMMU, then we need to use carveout allocator.
226 	 * Grab the entire CMA chunk carved out in early startup in
227 	 * mach-msm:
228 	 */
229 	if (!iommu_present(&platform_bus_type)) {
230 		DRM_INFO("using %s VRAM carveout\n", vram);
231 		size = memparse(vram, NULL);
232 	}
233 
234 	if (size) {
235 		DEFINE_DMA_ATTRS(attrs);
236 		void *p;
237 
238 		priv->vram.size = size;
239 
240 		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
241 
242 		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
243 		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
244 
245 		/* note that for no-kernel-mapping, the vaddr returned
246 		 * is bogus, but non-null if allocation succeeded:
247 		 */
248 		p = dma_alloc_attrs(dev->dev, size,
249 				&priv->vram.paddr, GFP_KERNEL, &attrs);
250 		if (!p) {
251 			dev_err(dev->dev, "failed to allocate VRAM\n");
252 			priv->vram.paddr = 0;
253 			return -ENOMEM;
254 		}
255 
256 		dev_info(dev->dev, "VRAM: %08x->%08x\n",
257 				(uint32_t)priv->vram.paddr,
258 				(uint32_t)(priv->vram.paddr + size));
259 	}
260 
261 	return ret;
262 }
263 
264 static int msm_load(struct drm_device *dev, unsigned long flags)
265 {
266 	struct platform_device *pdev = dev->platformdev;
267 	struct msm_drm_private *priv;
268 	struct msm_kms *kms;
269 	int ret;
270 
271 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
272 	if (!priv) {
273 		dev_err(dev->dev, "failed to allocate private data\n");
274 		return -ENOMEM;
275 	}
276 
277 	dev->dev_private = priv;
278 
279 	priv->wq = alloc_ordered_workqueue("msm", 0);
280 	init_waitqueue_head(&priv->fence_event);
281 	init_waitqueue_head(&priv->pending_crtcs_event);
282 
283 	INIT_LIST_HEAD(&priv->inactive_list);
284 	INIT_LIST_HEAD(&priv->fence_cbs);
285 
286 	drm_mode_config_init(dev);
287 
288 	platform_set_drvdata(pdev, dev);
289 
290 	/* Bind all our sub-components: */
291 	ret = component_bind_all(dev->dev, dev);
292 	if (ret)
293 		return ret;
294 
295 	ret = msm_init_vram(dev);
296 	if (ret)
297 		goto fail;
298 
299 	switch (get_mdp_ver(pdev)) {
300 	case 4:
301 		kms = mdp4_kms_init(dev);
302 		break;
303 	case 5:
304 		kms = mdp5_kms_init(dev);
305 		break;
306 	default:
307 		kms = ERR_PTR(-ENODEV);
308 		break;
309 	}
310 
311 	if (IS_ERR(kms)) {
312 		/*
313 		 * NOTE: once we have GPU support, having no kms should not
314 		 * be considered fatal.. ideally we would still support gpu
315 		 * and (for example) use dmabuf/prime to share buffers with
316 		 * imx drm driver on iMX5
317 		 */
318 		dev_err(dev->dev, "failed to load kms\n");
319 		ret = PTR_ERR(kms);
320 		goto fail;
321 	}
322 
323 	priv->kms = kms;
324 
325 	if (kms) {
326 		pm_runtime_enable(dev->dev);
327 		ret = kms->funcs->hw_init(kms);
328 		if (ret) {
329 			dev_err(dev->dev, "kms hw init failed: %d\n", ret);
330 			goto fail;
331 		}
332 	}
333 
334 	dev->mode_config.min_width = 0;
335 	dev->mode_config.min_height = 0;
336 	dev->mode_config.max_width = 2048;
337 	dev->mode_config.max_height = 2048;
338 	dev->mode_config.funcs = &mode_config_funcs;
339 
340 	ret = drm_vblank_init(dev, priv->num_crtcs);
341 	if (ret < 0) {
342 		dev_err(dev->dev, "failed to initialize vblank\n");
343 		goto fail;
344 	}
345 
346 	pm_runtime_get_sync(dev->dev);
347 	ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
348 	pm_runtime_put_sync(dev->dev);
349 	if (ret < 0) {
350 		dev_err(dev->dev, "failed to install IRQ handler\n");
351 		goto fail;
352 	}
353 
354 	drm_mode_config_reset(dev);
355 
356 #ifdef CONFIG_DRM_MSM_FBDEV
357 	if (fbdev)
358 		priv->fbdev = msm_fbdev_init(dev);
359 #endif
360 
361 	ret = msm_debugfs_late_init(dev);
362 	if (ret)
363 		goto fail;
364 
365 	drm_kms_helper_poll_init(dev);
366 
367 	return 0;
368 
369 fail:
370 	msm_unload(dev);
371 	return ret;
372 }
373 
374 static void load_gpu(struct drm_device *dev)
375 {
376 	static DEFINE_MUTEX(init_lock);
377 	struct msm_drm_private *priv = dev->dev_private;
378 
379 	mutex_lock(&init_lock);
380 
381 	if (!priv->gpu)
382 		priv->gpu = adreno_load_gpu(dev);
383 
384 	mutex_unlock(&init_lock);
385 }
386 
387 static int msm_open(struct drm_device *dev, struct drm_file *file)
388 {
389 	struct msm_file_private *ctx;
390 
391 	/* For now, load gpu on open.. to avoid the requirement of having
392 	 * firmware in the initrd.
393 	 */
394 	load_gpu(dev);
395 
396 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
397 	if (!ctx)
398 		return -ENOMEM;
399 
400 	file->driver_priv = ctx;
401 
402 	return 0;
403 }
404 
405 static void msm_preclose(struct drm_device *dev, struct drm_file *file)
406 {
407 	struct msm_drm_private *priv = dev->dev_private;
408 	struct msm_file_private *ctx = file->driver_priv;
409 	struct msm_kms *kms = priv->kms;
410 
411 	if (kms)
412 		kms->funcs->preclose(kms, file);
413 
414 	mutex_lock(&dev->struct_mutex);
415 	if (ctx == priv->lastctx)
416 		priv->lastctx = NULL;
417 	mutex_unlock(&dev->struct_mutex);
418 
419 	kfree(ctx);
420 }
421 
422 static void msm_lastclose(struct drm_device *dev)
423 {
424 #ifdef CONFIG_DRM_MSM_FBDEV
425 	struct msm_drm_private *priv = dev->dev_private;
426 	if (priv->fbdev)
427 		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
428 #endif
429 }
430 
431 static irqreturn_t msm_irq(int irq, void *arg)
432 {
433 	struct drm_device *dev = arg;
434 	struct msm_drm_private *priv = dev->dev_private;
435 	struct msm_kms *kms = priv->kms;
436 	BUG_ON(!kms);
437 	return kms->funcs->irq(kms);
438 }
439 
440 static void msm_irq_preinstall(struct drm_device *dev)
441 {
442 	struct msm_drm_private *priv = dev->dev_private;
443 	struct msm_kms *kms = priv->kms;
444 	BUG_ON(!kms);
445 	kms->funcs->irq_preinstall(kms);
446 }
447 
448 static int msm_irq_postinstall(struct drm_device *dev)
449 {
450 	struct msm_drm_private *priv = dev->dev_private;
451 	struct msm_kms *kms = priv->kms;
452 	BUG_ON(!kms);
453 	return kms->funcs->irq_postinstall(kms);
454 }
455 
456 static void msm_irq_uninstall(struct drm_device *dev)
457 {
458 	struct msm_drm_private *priv = dev->dev_private;
459 	struct msm_kms *kms = priv->kms;
460 	BUG_ON(!kms);
461 	kms->funcs->irq_uninstall(kms);
462 }
463 
464 static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
465 {
466 	struct msm_drm_private *priv = dev->dev_private;
467 	struct msm_kms *kms = priv->kms;
468 	if (!kms)
469 		return -ENXIO;
470 	DBG("dev=%p, crtc=%d", dev, crtc_id);
471 	return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
472 }
473 
474 static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
475 {
476 	struct msm_drm_private *priv = dev->dev_private;
477 	struct msm_kms *kms = priv->kms;
478 	if (!kms)
479 		return;
480 	DBG("dev=%p, crtc=%d", dev, crtc_id);
481 	kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
482 }
483 
484 /*
485  * DRM debugfs:
486  */
487 
488 #ifdef CONFIG_DEBUG_FS
489 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
490 {
491 	struct msm_drm_private *priv = dev->dev_private;
492 	struct msm_gpu *gpu = priv->gpu;
493 
494 	if (gpu) {
495 		seq_printf(m, "%s Status:\n", gpu->name);
496 		gpu->funcs->show(gpu, m);
497 	}
498 
499 	return 0;
500 }
501 
502 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
503 {
504 	struct msm_drm_private *priv = dev->dev_private;
505 	struct msm_gpu *gpu = priv->gpu;
506 
507 	if (gpu) {
508 		seq_printf(m, "Active Objects (%s):\n", gpu->name);
509 		msm_gem_describe_objects(&gpu->active_list, m);
510 	}
511 
512 	seq_printf(m, "Inactive Objects:\n");
513 	msm_gem_describe_objects(&priv->inactive_list, m);
514 
515 	return 0;
516 }
517 
518 static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
519 {
520 	return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
521 }
522 
523 static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
524 {
525 	struct msm_drm_private *priv = dev->dev_private;
526 	struct drm_framebuffer *fb, *fbdev_fb = NULL;
527 
528 	if (priv->fbdev) {
529 		seq_printf(m, "fbcon ");
530 		fbdev_fb = priv->fbdev->fb;
531 		msm_framebuffer_describe(fbdev_fb, m);
532 	}
533 
534 	mutex_lock(&dev->mode_config.fb_lock);
535 	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
536 		if (fb == fbdev_fb)
537 			continue;
538 
539 		seq_printf(m, "user ");
540 		msm_framebuffer_describe(fb, m);
541 	}
542 	mutex_unlock(&dev->mode_config.fb_lock);
543 
544 	return 0;
545 }
546 
547 static int show_locked(struct seq_file *m, void *arg)
548 {
549 	struct drm_info_node *node = (struct drm_info_node *) m->private;
550 	struct drm_device *dev = node->minor->dev;
551 	int (*show)(struct drm_device *dev, struct seq_file *m) =
552 			node->info_ent->data;
553 	int ret;
554 
555 	ret = mutex_lock_interruptible(&dev->struct_mutex);
556 	if (ret)
557 		return ret;
558 
559 	ret = show(dev, m);
560 
561 	mutex_unlock(&dev->struct_mutex);
562 
563 	return ret;
564 }
565 
566 static struct drm_info_list msm_debugfs_list[] = {
567 		{"gpu", show_locked, 0, msm_gpu_show},
568 		{"gem", show_locked, 0, msm_gem_show},
569 		{ "mm", show_locked, 0, msm_mm_show },
570 		{ "fb", show_locked, 0, msm_fb_show },
571 };
572 
573 static int late_init_minor(struct drm_minor *minor)
574 {
575 	int ret;
576 
577 	if (!minor)
578 		return 0;
579 
580 	ret = msm_rd_debugfs_init(minor);
581 	if (ret) {
582 		dev_err(minor->dev->dev, "could not install rd debugfs\n");
583 		return ret;
584 	}
585 
586 	ret = msm_perf_debugfs_init(minor);
587 	if (ret) {
588 		dev_err(minor->dev->dev, "could not install perf debugfs\n");
589 		return ret;
590 	}
591 
592 	return 0;
593 }
594 
595 int msm_debugfs_late_init(struct drm_device *dev)
596 {
597 	int ret;
598 	ret = late_init_minor(dev->primary);
599 	if (ret)
600 		return ret;
601 	ret = late_init_minor(dev->render);
602 	if (ret)
603 		return ret;
604 	ret = late_init_minor(dev->control);
605 	return ret;
606 }
607 
608 static int msm_debugfs_init(struct drm_minor *minor)
609 {
610 	struct drm_device *dev = minor->dev;
611 	int ret;
612 
613 	ret = drm_debugfs_create_files(msm_debugfs_list,
614 			ARRAY_SIZE(msm_debugfs_list),
615 			minor->debugfs_root, minor);
616 
617 	if (ret) {
618 		dev_err(dev->dev, "could not install msm_debugfs_list\n");
619 		return ret;
620 	}
621 
622 	return 0;
623 }
624 
625 static void msm_debugfs_cleanup(struct drm_minor *minor)
626 {
627 	drm_debugfs_remove_files(msm_debugfs_list,
628 			ARRAY_SIZE(msm_debugfs_list), minor);
629 	if (!minor->dev->dev_private)
630 		return;
631 	msm_rd_debugfs_cleanup(minor);
632 	msm_perf_debugfs_cleanup(minor);
633 }
634 #endif
635 
636 /*
637  * Fences:
638  */
639 
640 int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
641 		struct timespec *timeout)
642 {
643 	struct msm_drm_private *priv = dev->dev_private;
644 	int ret;
645 
646 	if (!priv->gpu)
647 		return 0;
648 
649 	if (fence > priv->gpu->submitted_fence) {
650 		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
651 				fence, priv->gpu->submitted_fence);
652 		return -EINVAL;
653 	}
654 
655 	if (!timeout) {
656 		/* no-wait: */
657 		ret = fence_completed(dev, fence) ? 0 : -EBUSY;
658 	} else {
659 		unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
660 		unsigned long start_jiffies = jiffies;
661 		unsigned long remaining_jiffies;
662 
663 		if (time_after(start_jiffies, timeout_jiffies))
664 			remaining_jiffies = 0;
665 		else
666 			remaining_jiffies = timeout_jiffies - start_jiffies;
667 
668 		ret = wait_event_interruptible_timeout(priv->fence_event,
669 				fence_completed(dev, fence),
670 				remaining_jiffies);
671 
672 		if (ret == 0) {
673 			DBG("timeout waiting for fence: %u (completed: %u)",
674 					fence, priv->completed_fence);
675 			ret = -ETIMEDOUT;
676 		} else if (ret != -ERESTARTSYS) {
677 			ret = 0;
678 		}
679 	}
680 
681 	return ret;
682 }
683 
684 int msm_queue_fence_cb(struct drm_device *dev,
685 		struct msm_fence_cb *cb, uint32_t fence)
686 {
687 	struct msm_drm_private *priv = dev->dev_private;
688 	int ret = 0;
689 
690 	mutex_lock(&dev->struct_mutex);
691 	if (!list_empty(&cb->work.entry)) {
692 		ret = -EINVAL;
693 	} else if (fence > priv->completed_fence) {
694 		cb->fence = fence;
695 		list_add_tail(&cb->work.entry, &priv->fence_cbs);
696 	} else {
697 		queue_work(priv->wq, &cb->work);
698 	}
699 	mutex_unlock(&dev->struct_mutex);
700 
701 	return ret;
702 }
703 
704 /* called from workqueue */
705 void msm_update_fence(struct drm_device *dev, uint32_t fence)
706 {
707 	struct msm_drm_private *priv = dev->dev_private;
708 
709 	mutex_lock(&dev->struct_mutex);
710 	priv->completed_fence = max(fence, priv->completed_fence);
711 
712 	while (!list_empty(&priv->fence_cbs)) {
713 		struct msm_fence_cb *cb;
714 
715 		cb = list_first_entry(&priv->fence_cbs,
716 				struct msm_fence_cb, work.entry);
717 
718 		if (cb->fence > priv->completed_fence)
719 			break;
720 
721 		list_del_init(&cb->work.entry);
722 		queue_work(priv->wq, &cb->work);
723 	}
724 
725 	mutex_unlock(&dev->struct_mutex);
726 
727 	wake_up_all(&priv->fence_event);
728 }
729 
730 void __msm_fence_worker(struct work_struct *work)
731 {
732 	struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
733 	cb->func(cb);
734 }
735 
736 /*
737  * DRM ioctls:
738  */
739 
740 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
741 		struct drm_file *file)
742 {
743 	struct msm_drm_private *priv = dev->dev_private;
744 	struct drm_msm_param *args = data;
745 	struct msm_gpu *gpu;
746 
747 	/* for now, we just have 3d pipe.. eventually this would need to
748 	 * be more clever to dispatch to appropriate gpu module:
749 	 */
750 	if (args->pipe != MSM_PIPE_3D0)
751 		return -EINVAL;
752 
753 	gpu = priv->gpu;
754 
755 	if (!gpu)
756 		return -ENXIO;
757 
758 	return gpu->funcs->get_param(gpu, args->param, &args->value);
759 }
760 
761 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
762 		struct drm_file *file)
763 {
764 	struct drm_msm_gem_new *args = data;
765 
766 	if (args->flags & ~MSM_BO_FLAGS) {
767 		DRM_ERROR("invalid flags: %08x\n", args->flags);
768 		return -EINVAL;
769 	}
770 
771 	return msm_gem_new_handle(dev, file, args->size,
772 			args->flags, &args->handle);
773 }
774 
775 #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
776 
777 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
778 		struct drm_file *file)
779 {
780 	struct drm_msm_gem_cpu_prep *args = data;
781 	struct drm_gem_object *obj;
782 	int ret;
783 
784 	if (args->op & ~MSM_PREP_FLAGS) {
785 		DRM_ERROR("invalid op: %08x\n", args->op);
786 		return -EINVAL;
787 	}
788 
789 	obj = drm_gem_object_lookup(dev, file, args->handle);
790 	if (!obj)
791 		return -ENOENT;
792 
793 	ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
794 
795 	drm_gem_object_unreference_unlocked(obj);
796 
797 	return ret;
798 }
799 
800 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
801 		struct drm_file *file)
802 {
803 	struct drm_msm_gem_cpu_fini *args = data;
804 	struct drm_gem_object *obj;
805 	int ret;
806 
807 	obj = drm_gem_object_lookup(dev, file, args->handle);
808 	if (!obj)
809 		return -ENOENT;
810 
811 	ret = msm_gem_cpu_fini(obj);
812 
813 	drm_gem_object_unreference_unlocked(obj);
814 
815 	return ret;
816 }
817 
818 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
819 		struct drm_file *file)
820 {
821 	struct drm_msm_gem_info *args = data;
822 	struct drm_gem_object *obj;
823 	int ret = 0;
824 
825 	if (args->pad)
826 		return -EINVAL;
827 
828 	obj = drm_gem_object_lookup(dev, file, args->handle);
829 	if (!obj)
830 		return -ENOENT;
831 
832 	args->offset = msm_gem_mmap_offset(obj);
833 
834 	drm_gem_object_unreference_unlocked(obj);
835 
836 	return ret;
837 }
838 
839 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
840 		struct drm_file *file)
841 {
842 	struct drm_msm_wait_fence *args = data;
843 
844 	if (args->pad) {
845 		DRM_ERROR("invalid pad: %08x\n", args->pad);
846 		return -EINVAL;
847 	}
848 
849 	return msm_wait_fence_interruptable(dev, args->fence,
850 			&TS(args->timeout));
851 }
852 
853 static const struct drm_ioctl_desc msm_ioctls[] = {
854 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
855 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
856 	DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
857 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
858 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
859 	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
860 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
861 };
862 
863 static const struct vm_operations_struct vm_ops = {
864 	.fault = msm_gem_fault,
865 	.open = drm_gem_vm_open,
866 	.close = drm_gem_vm_close,
867 };
868 
869 static const struct file_operations fops = {
870 	.owner              = THIS_MODULE,
871 	.open               = drm_open,
872 	.release            = drm_release,
873 	.unlocked_ioctl     = drm_ioctl,
874 #ifdef CONFIG_COMPAT
875 	.compat_ioctl       = drm_compat_ioctl,
876 #endif
877 	.poll               = drm_poll,
878 	.read               = drm_read,
879 	.llseek             = no_llseek,
880 	.mmap               = msm_gem_mmap,
881 };
882 
883 static struct drm_driver msm_driver = {
884 	.driver_features    = DRIVER_HAVE_IRQ |
885 				DRIVER_GEM |
886 				DRIVER_PRIME |
887 				DRIVER_RENDER |
888 				DRIVER_MODESET,
889 	.load               = msm_load,
890 	.unload             = msm_unload,
891 	.open               = msm_open,
892 	.preclose           = msm_preclose,
893 	.lastclose          = msm_lastclose,
894 	.set_busid          = drm_platform_set_busid,
895 	.irq_handler        = msm_irq,
896 	.irq_preinstall     = msm_irq_preinstall,
897 	.irq_postinstall    = msm_irq_postinstall,
898 	.irq_uninstall      = msm_irq_uninstall,
899 	.get_vblank_counter = drm_vblank_count,
900 	.enable_vblank      = msm_enable_vblank,
901 	.disable_vblank     = msm_disable_vblank,
902 	.gem_free_object    = msm_gem_free_object,
903 	.gem_vm_ops         = &vm_ops,
904 	.dumb_create        = msm_gem_dumb_create,
905 	.dumb_map_offset    = msm_gem_dumb_map_offset,
906 	.dumb_destroy       = drm_gem_dumb_destroy,
907 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
908 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
909 	.gem_prime_export   = drm_gem_prime_export,
910 	.gem_prime_import   = drm_gem_prime_import,
911 	.gem_prime_pin      = msm_gem_prime_pin,
912 	.gem_prime_unpin    = msm_gem_prime_unpin,
913 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
914 	.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
915 	.gem_prime_vmap     = msm_gem_prime_vmap,
916 	.gem_prime_vunmap   = msm_gem_prime_vunmap,
917 	.gem_prime_mmap     = msm_gem_prime_mmap,
918 #ifdef CONFIG_DEBUG_FS
919 	.debugfs_init       = msm_debugfs_init,
920 	.debugfs_cleanup    = msm_debugfs_cleanup,
921 #endif
922 	.ioctls             = msm_ioctls,
923 	.num_ioctls         = DRM_MSM_NUM_IOCTLS,
924 	.fops               = &fops,
925 	.name               = "msm",
926 	.desc               = "MSM Snapdragon DRM",
927 	.date               = "20130625",
928 	.major              = 1,
929 	.minor              = 0,
930 };
931 
932 #ifdef CONFIG_PM_SLEEP
933 static int msm_pm_suspend(struct device *dev)
934 {
935 	struct drm_device *ddev = dev_get_drvdata(dev);
936 
937 	drm_kms_helper_poll_disable(ddev);
938 
939 	return 0;
940 }
941 
942 static int msm_pm_resume(struct device *dev)
943 {
944 	struct drm_device *ddev = dev_get_drvdata(dev);
945 
946 	drm_kms_helper_poll_enable(ddev);
947 
948 	return 0;
949 }
950 #endif
951 
952 static const struct dev_pm_ops msm_pm_ops = {
953 	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
954 };
955 
956 /*
957  * Componentized driver support:
958  */
959 
960 #ifdef CONFIG_OF
961 /* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
962  * (or probably any other).. so probably some room for some helpers
963  */
964 static int compare_of(struct device *dev, void *data)
965 {
966 	return dev->of_node == data;
967 }
968 
969 static int add_components(struct device *dev, struct component_match **matchptr,
970 		const char *name)
971 {
972 	struct device_node *np = dev->of_node;
973 	unsigned i;
974 
975 	for (i = 0; ; i++) {
976 		struct device_node *node;
977 
978 		node = of_parse_phandle(np, name, i);
979 		if (!node)
980 			break;
981 
982 		component_match_add(dev, matchptr, compare_of, node);
983 	}
984 
985 	return 0;
986 }
987 #else
988 static int compare_dev(struct device *dev, void *data)
989 {
990 	return dev == data;
991 }
992 #endif
993 
994 static int msm_drm_bind(struct device *dev)
995 {
996 	return drm_platform_init(&msm_driver, to_platform_device(dev));
997 }
998 
999 static void msm_drm_unbind(struct device *dev)
1000 {
1001 	drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
1002 }
1003 
1004 static const struct component_master_ops msm_drm_ops = {
1005 	.bind = msm_drm_bind,
1006 	.unbind = msm_drm_unbind,
1007 };
1008 
1009 /*
1010  * Platform driver:
1011  */
1012 
1013 static int msm_pdev_probe(struct platform_device *pdev)
1014 {
1015 	struct component_match *match = NULL;
1016 #ifdef CONFIG_OF
1017 	add_components(&pdev->dev, &match, "connectors");
1018 	add_components(&pdev->dev, &match, "gpus");
1019 #else
1020 	/* For non-DT case, it kinda sucks.  We don't actually have a way
1021 	 * to know whether or not we are waiting for certain devices (or if
1022 	 * they are simply not present).  But for non-DT we only need to
1023 	 * care about apq8064/apq8060/etc (all mdp4/a3xx):
1024 	 */
1025 	static const char *devnames[] = {
1026 			"hdmi_msm.0", "kgsl-3d0.0",
1027 	};
1028 	int i;
1029 
1030 	DBG("Adding components..");
1031 
1032 	for (i = 0; i < ARRAY_SIZE(devnames); i++) {
1033 		struct device *dev;
1034 
1035 		dev = bus_find_device_by_name(&platform_bus_type,
1036 				NULL, devnames[i]);
1037 		if (!dev) {
1038 			dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);
1039 			return -EPROBE_DEFER;
1040 		}
1041 
1042 		component_match_add(&pdev->dev, &match, compare_dev, dev);
1043 	}
1044 #endif
1045 
1046 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1047 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1048 }
1049 
1050 static int msm_pdev_remove(struct platform_device *pdev)
1051 {
1052 	component_master_del(&pdev->dev, &msm_drm_ops);
1053 
1054 	return 0;
1055 }
1056 
1057 static const struct platform_device_id msm_id[] = {
1058 	{ "mdp", 0 },
1059 	{ }
1060 };
1061 
1062 static const struct of_device_id dt_match[] = {
1063 	{ .compatible = "qcom,mdp" },      /* mdp4 */
1064 	{ .compatible = "qcom,mdss_mdp" }, /* mdp5 */
1065 	{}
1066 };
1067 MODULE_DEVICE_TABLE(of, dt_match);
1068 
1069 static struct platform_driver msm_platform_driver = {
1070 	.probe      = msm_pdev_probe,
1071 	.remove     = msm_pdev_remove,
1072 	.driver     = {
1073 		.name   = "msm",
1074 		.of_match_table = dt_match,
1075 		.pm     = &msm_pm_ops,
1076 	},
1077 	.id_table   = msm_id,
1078 };
1079 
1080 static int __init msm_drm_register(void)
1081 {
1082 	DBG("init");
1083 	msm_dsi_register();
1084 	msm_edp_register();
1085 	hdmi_register();
1086 	adreno_register();
1087 	return platform_driver_register(&msm_platform_driver);
1088 }
1089 
1090 static void __exit msm_drm_unregister(void)
1091 {
1092 	DBG("fini");
1093 	platform_driver_unregister(&msm_platform_driver);
1094 	hdmi_unregister();
1095 	adreno_unregister();
1096 	msm_edp_unregister();
1097 	msm_dsi_unregister();
1098 }
1099 
1100 module_init(msm_drm_register);
1101 module_exit(msm_drm_unregister);
1102 
1103 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1104 MODULE_DESCRIPTION("MSM DRM Driver");
1105 MODULE_LICENSE("GPL");
1106