xref: /openbmc/linux/drivers/gpu/drm/msm/msm_drv.c (revision ba007062)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #include <linux/dma-mapping.h>
9 #include <linux/kthread.h>
10 #include <linux/sched/mm.h>
11 #include <linux/uaccess.h>
12 #include <uapi/linux/sched/types.h>
13 
14 #include <drm/drm_bridge.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_ioctl.h>
18 #include <drm/drm_prime.h>
19 #include <drm/drm_of.h>
20 #include <drm/drm_vblank.h>
21 
22 #include "disp/msm_disp_snapshot.h"
23 #include "msm_drv.h"
24 #include "msm_debugfs.h"
25 #include "msm_fence.h"
26 #include "msm_gem.h"
27 #include "msm_gpu.h"
28 #include "msm_kms.h"
29 #include "msm_mmu.h"
30 #include "adreno/adreno_gpu.h"
31 
32 /*
33  * MSM driver version:
34  * - 1.0.0 - initial interface
35  * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
36  * - 1.2.0 - adds explicit fence support for submit ioctl
37  * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
38  *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
39  *           MSM_GEM_INFO ioctl.
40  * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
41  *           GEM object's debug name
42  * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
43  * - 1.6.0 - Syncobj support
44  * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
45  * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
46  * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
47  */
48 #define MSM_VERSION_MAJOR	1
49 #define MSM_VERSION_MINOR	9
50 #define MSM_VERSION_PATCHLEVEL	0
51 
52 static const struct drm_mode_config_funcs mode_config_funcs = {
53 	.fb_create = msm_framebuffer_create,
54 	.output_poll_changed = drm_fb_helper_output_poll_changed,
55 	.atomic_check = drm_atomic_helper_check,
56 	.atomic_commit = drm_atomic_helper_commit,
57 };
58 
59 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
60 	.atomic_commit_tail = msm_atomic_commit_tail,
61 };
62 
63 #ifdef CONFIG_DRM_FBDEV_EMULATION
64 static bool fbdev = true;
65 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
66 module_param(fbdev, bool, 0600);
67 #endif
68 
69 static char *vram = "16m";
70 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
71 module_param(vram, charp, 0);
72 
73 bool dumpstate;
74 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
75 module_param(dumpstate, bool, 0600);
76 
77 static bool modeset = true;
78 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
79 module_param(modeset, bool, 0600);
80 
81 static irqreturn_t msm_irq(int irq, void *arg)
82 {
83 	struct drm_device *dev = arg;
84 	struct msm_drm_private *priv = dev->dev_private;
85 	struct msm_kms *kms = priv->kms;
86 
87 	BUG_ON(!kms);
88 
89 	return kms->funcs->irq(kms);
90 }
91 
92 static void msm_irq_preinstall(struct drm_device *dev)
93 {
94 	struct msm_drm_private *priv = dev->dev_private;
95 	struct msm_kms *kms = priv->kms;
96 
97 	BUG_ON(!kms);
98 
99 	kms->funcs->irq_preinstall(kms);
100 }
101 
102 static int msm_irq_postinstall(struct drm_device *dev)
103 {
104 	struct msm_drm_private *priv = dev->dev_private;
105 	struct msm_kms *kms = priv->kms;
106 
107 	BUG_ON(!kms);
108 
109 	if (kms->funcs->irq_postinstall)
110 		return kms->funcs->irq_postinstall(kms);
111 
112 	return 0;
113 }
114 
115 static int msm_irq_install(struct drm_device *dev, unsigned int irq)
116 {
117 	struct msm_drm_private *priv = dev->dev_private;
118 	struct msm_kms *kms = priv->kms;
119 	int ret;
120 
121 	if (irq == IRQ_NOTCONNECTED)
122 		return -ENOTCONN;
123 
124 	msm_irq_preinstall(dev);
125 
126 	ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
127 	if (ret)
128 		return ret;
129 
130 	kms->irq_requested = true;
131 
132 	ret = msm_irq_postinstall(dev);
133 	if (ret) {
134 		free_irq(irq, dev);
135 		return ret;
136 	}
137 
138 	return 0;
139 }
140 
141 static void msm_irq_uninstall(struct drm_device *dev)
142 {
143 	struct msm_drm_private *priv = dev->dev_private;
144 	struct msm_kms *kms = priv->kms;
145 
146 	kms->funcs->irq_uninstall(kms);
147 	if (kms->irq_requested)
148 		free_irq(kms->irq, dev);
149 }
150 
151 struct msm_vblank_work {
152 	struct work_struct work;
153 	int crtc_id;
154 	bool enable;
155 	struct msm_drm_private *priv;
156 };
157 
158 static void vblank_ctrl_worker(struct work_struct *work)
159 {
160 	struct msm_vblank_work *vbl_work = container_of(work,
161 						struct msm_vblank_work, work);
162 	struct msm_drm_private *priv = vbl_work->priv;
163 	struct msm_kms *kms = priv->kms;
164 
165 	if (vbl_work->enable)
166 		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
167 	else
168 		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
169 
170 	kfree(vbl_work);
171 }
172 
173 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
174 					int crtc_id, bool enable)
175 {
176 	struct msm_vblank_work *vbl_work;
177 
178 	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
179 	if (!vbl_work)
180 		return -ENOMEM;
181 
182 	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
183 
184 	vbl_work->crtc_id = crtc_id;
185 	vbl_work->enable = enable;
186 	vbl_work->priv = priv;
187 
188 	queue_work(priv->wq, &vbl_work->work);
189 
190 	return 0;
191 }
192 
193 static int msm_drm_uninit(struct device *dev)
194 {
195 	struct platform_device *pdev = to_platform_device(dev);
196 	struct msm_drm_private *priv = platform_get_drvdata(pdev);
197 	struct drm_device *ddev = priv->dev;
198 	struct msm_kms *kms = priv->kms;
199 	int i;
200 
201 	/*
202 	 * Shutdown the hw if we're far enough along where things might be on.
203 	 * If we run this too early, we'll end up panicking in any variety of
204 	 * places. Since we don't register the drm device until late in
205 	 * msm_drm_init, drm_dev->registered is used as an indicator that the
206 	 * shutdown will be successful.
207 	 */
208 	if (ddev->registered) {
209 		drm_dev_unregister(ddev);
210 		drm_atomic_helper_shutdown(ddev);
211 	}
212 
213 	/* We must cancel and cleanup any pending vblank enable/disable
214 	 * work before msm_irq_uninstall() to avoid work re-enabling an
215 	 * irq after uninstall has disabled it.
216 	 */
217 
218 	flush_workqueue(priv->wq);
219 
220 	/* clean up event worker threads */
221 	for (i = 0; i < priv->num_crtcs; i++) {
222 		if (priv->event_thread[i].worker)
223 			kthread_destroy_worker(priv->event_thread[i].worker);
224 	}
225 
226 	msm_gem_shrinker_cleanup(ddev);
227 
228 	drm_kms_helper_poll_fini(ddev);
229 
230 	msm_perf_debugfs_cleanup(priv);
231 	msm_rd_debugfs_cleanup(priv);
232 
233 #ifdef CONFIG_DRM_FBDEV_EMULATION
234 	if (fbdev && priv->fbdev)
235 		msm_fbdev_free(ddev);
236 #endif
237 
238 	msm_disp_snapshot_destroy(ddev);
239 
240 	drm_mode_config_cleanup(ddev);
241 
242 	for (i = 0; i < priv->num_bridges; i++)
243 		drm_bridge_remove(priv->bridges[i]);
244 
245 	pm_runtime_get_sync(dev);
246 	msm_irq_uninstall(ddev);
247 	pm_runtime_put_sync(dev);
248 
249 	if (kms && kms->funcs)
250 		kms->funcs->destroy(kms);
251 
252 	if (priv->vram.paddr) {
253 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
254 		drm_mm_takedown(&priv->vram.mm);
255 		dma_free_attrs(dev, priv->vram.size, NULL,
256 			       priv->vram.paddr, attrs);
257 	}
258 
259 	component_unbind_all(dev, ddev);
260 
261 	ddev->dev_private = NULL;
262 	drm_dev_put(ddev);
263 
264 	destroy_workqueue(priv->wq);
265 
266 	return 0;
267 }
268 
269 #include <linux/of_address.h>
270 
271 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
272 {
273 	struct iommu_domain *domain;
274 	struct msm_gem_address_space *aspace;
275 	struct msm_mmu *mmu;
276 	struct device *mdp_dev = dev->dev;
277 	struct device *mdss_dev = mdp_dev->parent;
278 	struct device *iommu_dev;
279 
280 	/*
281 	 * IOMMUs can be a part of MDSS device tree binding, or the
282 	 * MDP/DPU device.
283 	 */
284 	if (device_iommu_mapped(mdp_dev))
285 		iommu_dev = mdp_dev;
286 	else
287 		iommu_dev = mdss_dev;
288 
289 	domain = iommu_domain_alloc(iommu_dev->bus);
290 	if (!domain) {
291 		drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
292 		return NULL;
293 	}
294 
295 	mmu = msm_iommu_new(iommu_dev, domain);
296 	if (IS_ERR(mmu)) {
297 		iommu_domain_free(domain);
298 		return ERR_CAST(mmu);
299 	}
300 
301 	aspace = msm_gem_address_space_create(mmu, "mdp_kms",
302 		0x1000, 0x100000000 - 0x1000);
303 	if (IS_ERR(aspace))
304 		mmu->funcs->destroy(mmu);
305 
306 	return aspace;
307 }
308 
309 bool msm_use_mmu(struct drm_device *dev)
310 {
311 	struct msm_drm_private *priv = dev->dev_private;
312 
313 	/*
314 	 * a2xx comes with its own MMU
315 	 * On other platforms IOMMU can be declared specified either for the
316 	 * MDP/DPU device or for its parent, MDSS device.
317 	 */
318 	return priv->is_a2xx ||
319 		device_iommu_mapped(dev->dev) ||
320 		device_iommu_mapped(dev->dev->parent);
321 }
322 
323 static int msm_init_vram(struct drm_device *dev)
324 {
325 	struct msm_drm_private *priv = dev->dev_private;
326 	struct device_node *node;
327 	unsigned long size = 0;
328 	int ret = 0;
329 
330 	/* In the device-tree world, we could have a 'memory-region'
331 	 * phandle, which gives us a link to our "vram".  Allocating
332 	 * is all nicely abstracted behind the dma api, but we need
333 	 * to know the entire size to allocate it all in one go. There
334 	 * are two cases:
335 	 *  1) device with no IOMMU, in which case we need exclusive
336 	 *     access to a VRAM carveout big enough for all gpu
337 	 *     buffers
338 	 *  2) device with IOMMU, but where the bootloader puts up
339 	 *     a splash screen.  In this case, the VRAM carveout
340 	 *     need only be large enough for fbdev fb.  But we need
341 	 *     exclusive access to the buffer to avoid the kernel
342 	 *     using those pages for other purposes (which appears
343 	 *     as corruption on screen before we have a chance to
344 	 *     load and do initial modeset)
345 	 */
346 
347 	node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
348 	if (node) {
349 		struct resource r;
350 		ret = of_address_to_resource(node, 0, &r);
351 		of_node_put(node);
352 		if (ret)
353 			return ret;
354 		size = r.end - r.start + 1;
355 		DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
356 
357 		/* if we have no IOMMU, then we need to use carveout allocator.
358 		 * Grab the entire CMA chunk carved out in early startup in
359 		 * mach-msm:
360 		 */
361 	} else if (!msm_use_mmu(dev)) {
362 		DRM_INFO("using %s VRAM carveout\n", vram);
363 		size = memparse(vram, NULL);
364 	}
365 
366 	if (size) {
367 		unsigned long attrs = 0;
368 		void *p;
369 
370 		priv->vram.size = size;
371 
372 		drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
373 		spin_lock_init(&priv->vram.lock);
374 
375 		attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
376 		attrs |= DMA_ATTR_WRITE_COMBINE;
377 
378 		/* note that for no-kernel-mapping, the vaddr returned
379 		 * is bogus, but non-null if allocation succeeded:
380 		 */
381 		p = dma_alloc_attrs(dev->dev, size,
382 				&priv->vram.paddr, GFP_KERNEL, attrs);
383 		if (!p) {
384 			DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
385 			priv->vram.paddr = 0;
386 			return -ENOMEM;
387 		}
388 
389 		DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
390 				(uint32_t)priv->vram.paddr,
391 				(uint32_t)(priv->vram.paddr + size));
392 	}
393 
394 	return ret;
395 }
396 
397 static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
398 {
399 	struct msm_drm_private *priv = dev_get_drvdata(dev);
400 	struct drm_device *ddev;
401 	struct msm_kms *kms;
402 	int ret, i;
403 
404 	if (drm_firmware_drivers_only())
405 		return -ENODEV;
406 
407 	ddev = drm_dev_alloc(drv, dev);
408 	if (IS_ERR(ddev)) {
409 		DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
410 		return PTR_ERR(ddev);
411 	}
412 	ddev->dev_private = priv;
413 	priv->dev = ddev;
414 
415 	priv->wq = alloc_ordered_workqueue("msm", 0);
416 	priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
417 
418 	INIT_LIST_HEAD(&priv->objects);
419 	mutex_init(&priv->obj_lock);
420 
421 	INIT_LIST_HEAD(&priv->inactive_willneed);
422 	INIT_LIST_HEAD(&priv->inactive_dontneed);
423 	INIT_LIST_HEAD(&priv->inactive_unpinned);
424 	mutex_init(&priv->mm_lock);
425 
426 	/* Teach lockdep about lock ordering wrt. shrinker: */
427 	fs_reclaim_acquire(GFP_KERNEL);
428 	might_lock(&priv->mm_lock);
429 	fs_reclaim_release(GFP_KERNEL);
430 
431 	drm_mode_config_init(ddev);
432 
433 	ret = msm_init_vram(ddev);
434 	if (ret)
435 		return ret;
436 
437 	/* Bind all our sub-components: */
438 	ret = component_bind_all(dev, ddev);
439 	if (ret)
440 		return ret;
441 
442 	dma_set_max_seg_size(dev, UINT_MAX);
443 
444 	msm_gem_shrinker_init(ddev);
445 
446 	if (priv->kms_init) {
447 		ret = priv->kms_init(ddev);
448 		if (ret) {
449 			DRM_DEV_ERROR(dev, "failed to load kms\n");
450 			priv->kms = NULL;
451 			goto err_msm_uninit;
452 		}
453 		kms = priv->kms;
454 	} else {
455 		/* valid only for the dummy headless case, where of_node=NULL */
456 		WARN_ON(dev->of_node);
457 		kms = NULL;
458 	}
459 
460 	/* Enable normalization of plane zpos */
461 	ddev->mode_config.normalize_zpos = true;
462 
463 	if (kms) {
464 		kms->dev = ddev;
465 		ret = kms->funcs->hw_init(kms);
466 		if (ret) {
467 			DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
468 			goto err_msm_uninit;
469 		}
470 	}
471 
472 	ddev->mode_config.funcs = &mode_config_funcs;
473 	ddev->mode_config.helper_private = &mode_config_helper_funcs;
474 
475 	for (i = 0; i < priv->num_crtcs; i++) {
476 		/* initialize event thread */
477 		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
478 		priv->event_thread[i].dev = ddev;
479 		priv->event_thread[i].worker = kthread_create_worker(0,
480 			"crtc_event:%d", priv->event_thread[i].crtc_id);
481 		if (IS_ERR(priv->event_thread[i].worker)) {
482 			ret = PTR_ERR(priv->event_thread[i].worker);
483 			DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
484 			ret = PTR_ERR(priv->event_thread[i].worker);
485 			goto err_msm_uninit;
486 		}
487 
488 		sched_set_fifo(priv->event_thread[i].worker->task);
489 	}
490 
491 	ret = drm_vblank_init(ddev, priv->num_crtcs);
492 	if (ret < 0) {
493 		DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
494 		goto err_msm_uninit;
495 	}
496 
497 	if (kms) {
498 		pm_runtime_get_sync(dev);
499 		ret = msm_irq_install(ddev, kms->irq);
500 		pm_runtime_put_sync(dev);
501 		if (ret < 0) {
502 			DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
503 			goto err_msm_uninit;
504 		}
505 	}
506 
507 	ret = drm_dev_register(ddev, 0);
508 	if (ret)
509 		goto err_msm_uninit;
510 
511 	if (kms) {
512 		ret = msm_disp_snapshot_init(ddev);
513 		if (ret)
514 			DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
515 	}
516 	drm_mode_config_reset(ddev);
517 
518 #ifdef CONFIG_DRM_FBDEV_EMULATION
519 	if (kms && fbdev)
520 		priv->fbdev = msm_fbdev_init(ddev);
521 #endif
522 
523 	ret = msm_debugfs_late_init(ddev);
524 	if (ret)
525 		goto err_msm_uninit;
526 
527 	drm_kms_helper_poll_init(ddev);
528 
529 	return 0;
530 
531 err_msm_uninit:
532 	msm_drm_uninit(dev);
533 	return ret;
534 }
535 
536 /*
537  * DRM operations:
538  */
539 
540 static void load_gpu(struct drm_device *dev)
541 {
542 	static DEFINE_MUTEX(init_lock);
543 	struct msm_drm_private *priv = dev->dev_private;
544 
545 	mutex_lock(&init_lock);
546 
547 	if (!priv->gpu)
548 		priv->gpu = adreno_load_gpu(dev);
549 
550 	mutex_unlock(&init_lock);
551 }
552 
553 static int context_init(struct drm_device *dev, struct drm_file *file)
554 {
555 	static atomic_t ident = ATOMIC_INIT(0);
556 	struct msm_drm_private *priv = dev->dev_private;
557 	struct msm_file_private *ctx;
558 
559 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
560 	if (!ctx)
561 		return -ENOMEM;
562 
563 	INIT_LIST_HEAD(&ctx->submitqueues);
564 	rwlock_init(&ctx->queuelock);
565 
566 	kref_init(&ctx->ref);
567 	msm_submitqueue_init(dev, ctx);
568 
569 	ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
570 	file->driver_priv = ctx;
571 
572 	ctx->seqno = atomic_inc_return(&ident);
573 
574 	return 0;
575 }
576 
577 static int msm_open(struct drm_device *dev, struct drm_file *file)
578 {
579 	/* For now, load gpu on open.. to avoid the requirement of having
580 	 * firmware in the initrd.
581 	 */
582 	load_gpu(dev);
583 
584 	return context_init(dev, file);
585 }
586 
587 static void context_close(struct msm_file_private *ctx)
588 {
589 	msm_submitqueue_close(ctx);
590 	msm_file_private_put(ctx);
591 }
592 
593 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
594 {
595 	struct msm_drm_private *priv = dev->dev_private;
596 	struct msm_file_private *ctx = file->driver_priv;
597 
598 	/*
599 	 * It is not possible to set sysprof param to non-zero if gpu
600 	 * is not initialized:
601 	 */
602 	if (priv->gpu)
603 		msm_file_private_set_sysprof(ctx, priv->gpu, 0);
604 
605 	context_close(ctx);
606 }
607 
608 int msm_crtc_enable_vblank(struct drm_crtc *crtc)
609 {
610 	struct drm_device *dev = crtc->dev;
611 	unsigned int pipe = crtc->index;
612 	struct msm_drm_private *priv = dev->dev_private;
613 	struct msm_kms *kms = priv->kms;
614 	if (!kms)
615 		return -ENXIO;
616 	drm_dbg_vbl(dev, "crtc=%u", pipe);
617 	return vblank_ctrl_queue_work(priv, pipe, true);
618 }
619 
620 void msm_crtc_disable_vblank(struct drm_crtc *crtc)
621 {
622 	struct drm_device *dev = crtc->dev;
623 	unsigned int pipe = crtc->index;
624 	struct msm_drm_private *priv = dev->dev_private;
625 	struct msm_kms *kms = priv->kms;
626 	if (!kms)
627 		return;
628 	drm_dbg_vbl(dev, "crtc=%u", pipe);
629 	vblank_ctrl_queue_work(priv, pipe, false);
630 }
631 
632 /*
633  * DRM ioctls:
634  */
635 
636 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
637 		struct drm_file *file)
638 {
639 	struct msm_drm_private *priv = dev->dev_private;
640 	struct drm_msm_param *args = data;
641 	struct msm_gpu *gpu;
642 
643 	/* for now, we just have 3d pipe.. eventually this would need to
644 	 * be more clever to dispatch to appropriate gpu module:
645 	 */
646 	if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
647 		return -EINVAL;
648 
649 	gpu = priv->gpu;
650 
651 	if (!gpu)
652 		return -ENXIO;
653 
654 	return gpu->funcs->get_param(gpu, file->driver_priv,
655 				     args->param, &args->value, &args->len);
656 }
657 
658 static int msm_ioctl_set_param(struct drm_device *dev, void *data,
659 		struct drm_file *file)
660 {
661 	struct msm_drm_private *priv = dev->dev_private;
662 	struct drm_msm_param *args = data;
663 	struct msm_gpu *gpu;
664 
665 	if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
666 		return -EINVAL;
667 
668 	gpu = priv->gpu;
669 
670 	if (!gpu)
671 		return -ENXIO;
672 
673 	return gpu->funcs->set_param(gpu, file->driver_priv,
674 				     args->param, args->value, args->len);
675 }
676 
677 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
678 		struct drm_file *file)
679 {
680 	struct drm_msm_gem_new *args = data;
681 	uint32_t flags = args->flags;
682 
683 	if (args->flags & ~MSM_BO_FLAGS) {
684 		DRM_ERROR("invalid flags: %08x\n", args->flags);
685 		return -EINVAL;
686 	}
687 
688 	/*
689 	 * Uncached CPU mappings are deprecated, as of:
690 	 *
691 	 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
692 	 *
693 	 * So promote them to WC.
694 	 */
695 	if (flags & MSM_BO_UNCACHED) {
696 		flags &= ~MSM_BO_CACHED;
697 		flags |= MSM_BO_WC;
698 	}
699 
700 	return msm_gem_new_handle(dev, file, args->size,
701 			args->flags, &args->handle, NULL);
702 }
703 
704 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
705 {
706 	return ktime_set(timeout.tv_sec, timeout.tv_nsec);
707 }
708 
709 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
710 		struct drm_file *file)
711 {
712 	struct drm_msm_gem_cpu_prep *args = data;
713 	struct drm_gem_object *obj;
714 	ktime_t timeout = to_ktime(args->timeout);
715 	int ret;
716 
717 	if (args->op & ~MSM_PREP_FLAGS) {
718 		DRM_ERROR("invalid op: %08x\n", args->op);
719 		return -EINVAL;
720 	}
721 
722 	obj = drm_gem_object_lookup(file, args->handle);
723 	if (!obj)
724 		return -ENOENT;
725 
726 	ret = msm_gem_cpu_prep(obj, args->op, &timeout);
727 
728 	drm_gem_object_put(obj);
729 
730 	return ret;
731 }
732 
733 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
734 		struct drm_file *file)
735 {
736 	struct drm_msm_gem_cpu_fini *args = data;
737 	struct drm_gem_object *obj;
738 	int ret;
739 
740 	obj = drm_gem_object_lookup(file, args->handle);
741 	if (!obj)
742 		return -ENOENT;
743 
744 	ret = msm_gem_cpu_fini(obj);
745 
746 	drm_gem_object_put(obj);
747 
748 	return ret;
749 }
750 
751 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
752 		struct drm_file *file, struct drm_gem_object *obj,
753 		uint64_t *iova)
754 {
755 	struct msm_drm_private *priv = dev->dev_private;
756 	struct msm_file_private *ctx = file->driver_priv;
757 
758 	if (!priv->gpu)
759 		return -EINVAL;
760 
761 	/*
762 	 * Don't pin the memory here - just get an address so that userspace can
763 	 * be productive
764 	 */
765 	return msm_gem_get_iova(obj, ctx->aspace, iova);
766 }
767 
768 static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
769 		struct drm_file *file, struct drm_gem_object *obj,
770 		uint64_t iova)
771 {
772 	struct msm_drm_private *priv = dev->dev_private;
773 	struct msm_file_private *ctx = file->driver_priv;
774 
775 	if (!priv->gpu)
776 		return -EINVAL;
777 
778 	/* Only supported if per-process address space is supported: */
779 	if (priv->gpu->aspace == ctx->aspace)
780 		return -EOPNOTSUPP;
781 
782 	return msm_gem_set_iova(obj, ctx->aspace, iova);
783 }
784 
785 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
786 		struct drm_file *file)
787 {
788 	struct drm_msm_gem_info *args = data;
789 	struct drm_gem_object *obj;
790 	struct msm_gem_object *msm_obj;
791 	int i, ret = 0;
792 
793 	if (args->pad)
794 		return -EINVAL;
795 
796 	switch (args->info) {
797 	case MSM_INFO_GET_OFFSET:
798 	case MSM_INFO_GET_IOVA:
799 	case MSM_INFO_SET_IOVA:
800 		/* value returned as immediate, not pointer, so len==0: */
801 		if (args->len)
802 			return -EINVAL;
803 		break;
804 	case MSM_INFO_SET_NAME:
805 	case MSM_INFO_GET_NAME:
806 		break;
807 	default:
808 		return -EINVAL;
809 	}
810 
811 	obj = drm_gem_object_lookup(file, args->handle);
812 	if (!obj)
813 		return -ENOENT;
814 
815 	msm_obj = to_msm_bo(obj);
816 
817 	switch (args->info) {
818 	case MSM_INFO_GET_OFFSET:
819 		args->value = msm_gem_mmap_offset(obj);
820 		break;
821 	case MSM_INFO_GET_IOVA:
822 		ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
823 		break;
824 	case MSM_INFO_SET_IOVA:
825 		ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
826 		break;
827 	case MSM_INFO_SET_NAME:
828 		/* length check should leave room for terminating null: */
829 		if (args->len >= sizeof(msm_obj->name)) {
830 			ret = -EINVAL;
831 			break;
832 		}
833 		if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
834 				   args->len)) {
835 			msm_obj->name[0] = '\0';
836 			ret = -EFAULT;
837 			break;
838 		}
839 		msm_obj->name[args->len] = '\0';
840 		for (i = 0; i < args->len; i++) {
841 			if (!isprint(msm_obj->name[i])) {
842 				msm_obj->name[i] = '\0';
843 				break;
844 			}
845 		}
846 		break;
847 	case MSM_INFO_GET_NAME:
848 		if (args->value && (args->len < strlen(msm_obj->name))) {
849 			ret = -EINVAL;
850 			break;
851 		}
852 		args->len = strlen(msm_obj->name);
853 		if (args->value) {
854 			if (copy_to_user(u64_to_user_ptr(args->value),
855 					 msm_obj->name, args->len))
856 				ret = -EFAULT;
857 		}
858 		break;
859 	}
860 
861 	drm_gem_object_put(obj);
862 
863 	return ret;
864 }
865 
866 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
867 		      ktime_t timeout)
868 {
869 	struct dma_fence *fence;
870 	int ret;
871 
872 	if (fence_after(fence_id, queue->last_fence)) {
873 		DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
874 				      fence_id, queue->last_fence);
875 		return -EINVAL;
876 	}
877 
878 	/*
879 	 * Map submitqueue scoped "seqno" (which is actually an idr key)
880 	 * back to underlying dma-fence
881 	 *
882 	 * The fence is removed from the fence_idr when the submit is
883 	 * retired, so if the fence is not found it means there is nothing
884 	 * to wait for
885 	 */
886 	ret = mutex_lock_interruptible(&queue->lock);
887 	if (ret)
888 		return ret;
889 	fence = idr_find(&queue->fence_idr, fence_id);
890 	if (fence)
891 		fence = dma_fence_get_rcu(fence);
892 	mutex_unlock(&queue->lock);
893 
894 	if (!fence)
895 		return 0;
896 
897 	ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
898 	if (ret == 0) {
899 		ret = -ETIMEDOUT;
900 	} else if (ret != -ERESTARTSYS) {
901 		ret = 0;
902 	}
903 
904 	dma_fence_put(fence);
905 
906 	return ret;
907 }
908 
909 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
910 		struct drm_file *file)
911 {
912 	struct msm_drm_private *priv = dev->dev_private;
913 	struct drm_msm_wait_fence *args = data;
914 	struct msm_gpu_submitqueue *queue;
915 	int ret;
916 
917 	if (args->pad) {
918 		DRM_ERROR("invalid pad: %08x\n", args->pad);
919 		return -EINVAL;
920 	}
921 
922 	if (!priv->gpu)
923 		return 0;
924 
925 	queue = msm_submitqueue_get(file->driver_priv, args->queueid);
926 	if (!queue)
927 		return -ENOENT;
928 
929 	ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
930 
931 	msm_submitqueue_put(queue);
932 
933 	return ret;
934 }
935 
936 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
937 		struct drm_file *file)
938 {
939 	struct drm_msm_gem_madvise *args = data;
940 	struct drm_gem_object *obj;
941 	int ret;
942 
943 	switch (args->madv) {
944 	case MSM_MADV_DONTNEED:
945 	case MSM_MADV_WILLNEED:
946 		break;
947 	default:
948 		return -EINVAL;
949 	}
950 
951 	obj = drm_gem_object_lookup(file, args->handle);
952 	if (!obj) {
953 		return -ENOENT;
954 	}
955 
956 	ret = msm_gem_madvise(obj, args->madv);
957 	if (ret >= 0) {
958 		args->retained = ret;
959 		ret = 0;
960 	}
961 
962 	drm_gem_object_put(obj);
963 
964 	return ret;
965 }
966 
967 
968 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
969 		struct drm_file *file)
970 {
971 	struct drm_msm_submitqueue *args = data;
972 
973 	if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
974 		return -EINVAL;
975 
976 	return msm_submitqueue_create(dev, file->driver_priv, args->prio,
977 		args->flags, &args->id);
978 }
979 
980 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
981 		struct drm_file *file)
982 {
983 	return msm_submitqueue_query(dev, file->driver_priv, data);
984 }
985 
986 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
987 		struct drm_file *file)
988 {
989 	u32 id = *(u32 *) data;
990 
991 	return msm_submitqueue_remove(file->driver_priv, id);
992 }
993 
994 static const struct drm_ioctl_desc msm_ioctls[] = {
995 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_RENDER_ALLOW),
996 	DRM_IOCTL_DEF_DRV(MSM_SET_PARAM,    msm_ioctl_set_param,    DRM_RENDER_ALLOW),
997 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_RENDER_ALLOW),
998 	DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_RENDER_ALLOW),
999 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
1000 	DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
1001 	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_RENDER_ALLOW),
1002 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_RENDER_ALLOW),
1003 	DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_RENDER_ALLOW),
1004 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_RENDER_ALLOW),
1005 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
1006 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
1007 };
1008 
1009 static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f)
1010 {
1011 	struct drm_file *file = f->private_data;
1012 	struct drm_device *dev = file->minor->dev;
1013 	struct msm_drm_private *priv = dev->dev_private;
1014 	struct drm_printer p = drm_seq_file_printer(m);
1015 
1016 	if (!priv->gpu)
1017 		return;
1018 
1019 	msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p);
1020 }
1021 
1022 static const struct file_operations fops = {
1023 	.owner = THIS_MODULE,
1024 	DRM_GEM_FOPS,
1025 	.show_fdinfo = msm_fop_show_fdinfo,
1026 };
1027 
1028 static const struct drm_driver msm_driver = {
1029 	.driver_features    = DRIVER_GEM |
1030 				DRIVER_RENDER |
1031 				DRIVER_ATOMIC |
1032 				DRIVER_MODESET |
1033 				DRIVER_SYNCOBJ,
1034 	.open               = msm_open,
1035 	.postclose           = msm_postclose,
1036 	.lastclose          = drm_fb_helper_lastclose,
1037 	.dumb_create        = msm_gem_dumb_create,
1038 	.dumb_map_offset    = msm_gem_dumb_map_offset,
1039 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1040 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1041 	.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1042 	.gem_prime_mmap     = msm_gem_prime_mmap,
1043 #ifdef CONFIG_DEBUG_FS
1044 	.debugfs_init       = msm_debugfs_init,
1045 #endif
1046 	.ioctls             = msm_ioctls,
1047 	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
1048 	.fops               = &fops,
1049 	.name               = "msm",
1050 	.desc               = "MSM Snapdragon DRM",
1051 	.date               = "20130625",
1052 	.major              = MSM_VERSION_MAJOR,
1053 	.minor              = MSM_VERSION_MINOR,
1054 	.patchlevel         = MSM_VERSION_PATCHLEVEL,
1055 };
1056 
1057 int msm_pm_prepare(struct device *dev)
1058 {
1059 	struct msm_drm_private *priv = dev_get_drvdata(dev);
1060 	struct drm_device *ddev = priv ? priv->dev : NULL;
1061 
1062 	if (!priv || !priv->kms)
1063 		return 0;
1064 
1065 	return drm_mode_config_helper_suspend(ddev);
1066 }
1067 
1068 void msm_pm_complete(struct device *dev)
1069 {
1070 	struct msm_drm_private *priv = dev_get_drvdata(dev);
1071 	struct drm_device *ddev = priv ? priv->dev : NULL;
1072 
1073 	if (!priv || !priv->kms)
1074 		return;
1075 
1076 	drm_mode_config_helper_resume(ddev);
1077 }
1078 
1079 static const struct dev_pm_ops msm_pm_ops = {
1080 	.prepare = msm_pm_prepare,
1081 	.complete = msm_pm_complete,
1082 };
1083 
1084 /*
1085  * Componentized driver support:
1086  */
1087 
1088 /*
1089  * Identify what components need to be added by parsing what remote-endpoints
1090  * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1091  * is no external component that we need to add since LVDS is within MDP4
1092  * itself.
1093  */
1094 static int add_components_mdp(struct device *master_dev,
1095 			      struct component_match **matchptr)
1096 {
1097 	struct device_node *np = master_dev->of_node;
1098 	struct device_node *ep_node;
1099 
1100 	for_each_endpoint_of_node(np, ep_node) {
1101 		struct device_node *intf;
1102 		struct of_endpoint ep;
1103 		int ret;
1104 
1105 		ret = of_graph_parse_endpoint(ep_node, &ep);
1106 		if (ret) {
1107 			DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
1108 			of_node_put(ep_node);
1109 			return ret;
1110 		}
1111 
1112 		/*
1113 		 * The LCDC/LVDS port on MDP4 is a speacial case where the
1114 		 * remote-endpoint isn't a component that we need to add
1115 		 */
1116 		if (of_device_is_compatible(np, "qcom,mdp4") &&
1117 		    ep.port == 0)
1118 			continue;
1119 
1120 		/*
1121 		 * It's okay if some of the ports don't have a remote endpoint
1122 		 * specified. It just means that the port isn't connected to
1123 		 * any external interface.
1124 		 */
1125 		intf = of_graph_get_remote_port_parent(ep_node);
1126 		if (!intf)
1127 			continue;
1128 
1129 		if (of_device_is_available(intf))
1130 			drm_of_component_match_add(master_dev, matchptr,
1131 						   component_compare_of, intf);
1132 
1133 		of_node_put(intf);
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 /*
1140  * We don't know what's the best binding to link the gpu with the drm device.
1141  * Fow now, we just hunt for all the possible gpus that we support, and add them
1142  * as components.
1143  */
1144 static const struct of_device_id msm_gpu_match[] = {
1145 	{ .compatible = "qcom,adreno" },
1146 	{ .compatible = "qcom,adreno-3xx" },
1147 	{ .compatible = "amd,imageon" },
1148 	{ .compatible = "qcom,kgsl-3d0" },
1149 	{ },
1150 };
1151 
1152 static int add_gpu_components(struct device *dev,
1153 			      struct component_match **matchptr)
1154 {
1155 	struct device_node *np;
1156 
1157 	np = of_find_matching_node(NULL, msm_gpu_match);
1158 	if (!np)
1159 		return 0;
1160 
1161 	if (of_device_is_available(np))
1162 		drm_of_component_match_add(dev, matchptr, component_compare_of, np);
1163 
1164 	of_node_put(np);
1165 
1166 	return 0;
1167 }
1168 
1169 static int msm_drm_bind(struct device *dev)
1170 {
1171 	return msm_drm_init(dev, &msm_driver);
1172 }
1173 
1174 static void msm_drm_unbind(struct device *dev)
1175 {
1176 	msm_drm_uninit(dev);
1177 }
1178 
1179 const struct component_master_ops msm_drm_ops = {
1180 	.bind = msm_drm_bind,
1181 	.unbind = msm_drm_unbind,
1182 };
1183 
1184 int msm_drv_probe(struct device *master_dev,
1185 	int (*kms_init)(struct drm_device *dev))
1186 {
1187 	struct msm_drm_private *priv;
1188 	struct component_match *match = NULL;
1189 	int ret;
1190 
1191 	priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
1192 	if (!priv)
1193 		return -ENOMEM;
1194 
1195 	priv->kms_init = kms_init;
1196 	dev_set_drvdata(master_dev, priv);
1197 
1198 	/* Add mdp components if we have KMS. */
1199 	if (kms_init) {
1200 		ret = add_components_mdp(master_dev, &match);
1201 		if (ret)
1202 			return ret;
1203 	}
1204 
1205 	ret = add_gpu_components(master_dev, &match);
1206 	if (ret)
1207 		return ret;
1208 
1209 	/* on all devices that I am aware of, iommu's which can map
1210 	 * any address the cpu can see are used:
1211 	 */
1212 	ret = dma_set_mask_and_coherent(master_dev, ~0);
1213 	if (ret)
1214 		return ret;
1215 
1216 	ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
1217 	if (ret)
1218 		return ret;
1219 
1220 	return 0;
1221 }
1222 
1223 /*
1224  * Platform driver:
1225  * Used only for headlesss GPU instances
1226  */
1227 
1228 static int msm_pdev_probe(struct platform_device *pdev)
1229 {
1230 	return msm_drv_probe(&pdev->dev, NULL);
1231 }
1232 
1233 static int msm_pdev_remove(struct platform_device *pdev)
1234 {
1235 	component_master_del(&pdev->dev, &msm_drm_ops);
1236 
1237 	return 0;
1238 }
1239 
1240 void msm_drv_shutdown(struct platform_device *pdev)
1241 {
1242 	struct msm_drm_private *priv = platform_get_drvdata(pdev);
1243 	struct drm_device *drm = priv ? priv->dev : NULL;
1244 
1245 	if (!priv || !priv->kms)
1246 		return;
1247 
1248 	drm_atomic_helper_shutdown(drm);
1249 }
1250 
1251 static struct platform_driver msm_platform_driver = {
1252 	.probe      = msm_pdev_probe,
1253 	.remove     = msm_pdev_remove,
1254 	.shutdown   = msm_drv_shutdown,
1255 	.driver     = {
1256 		.name   = "msm",
1257 		.pm     = &msm_pm_ops,
1258 	},
1259 };
1260 
1261 static int __init msm_drm_register(void)
1262 {
1263 	if (!modeset)
1264 		return -EINVAL;
1265 
1266 	DBG("init");
1267 	msm_mdp_register();
1268 	msm_dpu_register();
1269 	msm_dsi_register();
1270 	msm_hdmi_register();
1271 	msm_dp_register();
1272 	adreno_register();
1273 	msm_mdp4_register();
1274 	msm_mdss_register();
1275 	return platform_driver_register(&msm_platform_driver);
1276 }
1277 
1278 static void __exit msm_drm_unregister(void)
1279 {
1280 	DBG("fini");
1281 	platform_driver_unregister(&msm_platform_driver);
1282 	msm_mdss_unregister();
1283 	msm_mdp4_unregister();
1284 	msm_dp_unregister();
1285 	msm_hdmi_unregister();
1286 	adreno_unregister();
1287 	msm_dsi_unregister();
1288 	msm_mdp_unregister();
1289 	msm_dpu_unregister();
1290 }
1291 
1292 module_init(msm_drm_register);
1293 module_exit(msm_drm_unregister);
1294 
1295 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1296 MODULE_DESCRIPTION("MSM DRM Driver");
1297 MODULE_LICENSE("GPL");
1298