1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/delay.h>
8 
9 #include <drm/drm_vblank.h>
10 
11 #include "msm_drv.h"
12 #include "msm_gem.h"
13 #include "msm_mmu.h"
14 #include "mdp4_kms.h"
15 
16 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
17 
18 static int mdp4_hw_init(struct msm_kms *kms)
19 {
20 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
21 	struct drm_device *dev = mdp4_kms->dev;
22 	uint32_t version, major, minor, dmap_cfg, vg_cfg;
23 	unsigned long clk;
24 	int ret = 0;
25 
26 	pm_runtime_get_sync(dev->dev);
27 
28 	mdp4_enable(mdp4_kms);
29 	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
30 	mdp4_disable(mdp4_kms);
31 
32 	major = FIELD(version, MDP4_VERSION_MAJOR);
33 	minor = FIELD(version, MDP4_VERSION_MINOR);
34 
35 	DBG("found MDP4 version v%d.%d", major, minor);
36 
37 	if (major != 4) {
38 		DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
39 				major, minor);
40 		ret = -ENXIO;
41 		goto out;
42 	}
43 
44 	mdp4_kms->rev = minor;
45 
46 	if (mdp4_kms->rev > 1) {
47 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
48 		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
49 	}
50 
51 	mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
52 
53 	/* max read pending cmd config, 3 pending requests: */
54 	mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
55 
56 	clk = clk_get_rate(mdp4_kms->clk);
57 
58 	if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
59 		dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
60 		vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
61 	} else {
62 		dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
63 		vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
64 	}
65 
66 	DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
67 
68 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
69 	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
70 
71 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
72 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
73 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
74 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
75 
76 	if (mdp4_kms->rev >= 2)
77 		mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
78 	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
79 
80 	/* disable CSC matrix / YUV by default: */
81 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
82 	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
83 	mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
84 	mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
85 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
86 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
87 
88 	if (mdp4_kms->rev > 1)
89 		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
90 
91 	dev->mode_config.allow_fb_modifiers = true;
92 
93 out:
94 	pm_runtime_put_sync(dev->dev);
95 
96 	return ret;
97 }
98 
99 static void mdp4_enable_commit(struct msm_kms *kms)
100 {
101 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
102 	mdp4_enable(mdp4_kms);
103 }
104 
105 static void mdp4_disable_commit(struct msm_kms *kms)
106 {
107 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
108 	mdp4_disable(mdp4_kms);
109 }
110 
111 static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
112 {
113 	int i;
114 	struct drm_crtc *crtc;
115 	struct drm_crtc_state *crtc_state;
116 
117 	/* see 119ecb7fd */
118 	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
119 		drm_crtc_vblank_get(crtc);
120 }
121 
122 static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
123 {
124 	/* TODO */
125 }
126 
127 static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
128 {
129 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
130 	struct drm_crtc *crtc;
131 
132 	for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
133 		mdp4_crtc_wait_for_commit_done(crtc);
134 }
135 
136 static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
137 {
138 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
139 	struct drm_crtc *crtc;
140 
141 	/* see 119ecb7fd */
142 	for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
143 		drm_crtc_vblank_put(crtc);
144 }
145 
146 static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
147 		struct drm_encoder *encoder)
148 {
149 	/* if we had >1 encoder, we'd need something more clever: */
150 	switch (encoder->encoder_type) {
151 	case DRM_MODE_ENCODER_TMDS:
152 		return mdp4_dtv_round_pixclk(encoder, rate);
153 	case DRM_MODE_ENCODER_LVDS:
154 	case DRM_MODE_ENCODER_DSI:
155 	default:
156 		return rate;
157 	}
158 }
159 
160 static void mdp4_destroy(struct msm_kms *kms)
161 {
162 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
163 	struct device *dev = mdp4_kms->dev->dev;
164 	struct msm_gem_address_space *aspace = kms->aspace;
165 
166 	if (mdp4_kms->blank_cursor_iova)
167 		msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
168 	drm_gem_object_put(mdp4_kms->blank_cursor_bo);
169 
170 	if (aspace) {
171 		aspace->mmu->funcs->detach(aspace->mmu);
172 		msm_gem_address_space_put(aspace);
173 	}
174 
175 	if (mdp4_kms->rpm_enabled)
176 		pm_runtime_disable(dev);
177 
178 	mdp_kms_destroy(&mdp4_kms->base);
179 
180 	kfree(mdp4_kms);
181 }
182 
183 static const struct mdp_kms_funcs kms_funcs = {
184 	.base = {
185 		.hw_init         = mdp4_hw_init,
186 		.irq_preinstall  = mdp4_irq_preinstall,
187 		.irq_postinstall = mdp4_irq_postinstall,
188 		.irq_uninstall   = mdp4_irq_uninstall,
189 		.irq             = mdp4_irq,
190 		.enable_vblank   = mdp4_enable_vblank,
191 		.disable_vblank  = mdp4_disable_vblank,
192 		.enable_commit   = mdp4_enable_commit,
193 		.disable_commit  = mdp4_disable_commit,
194 		.prepare_commit  = mdp4_prepare_commit,
195 		.flush_commit    = mdp4_flush_commit,
196 		.wait_flush      = mdp4_wait_flush,
197 		.complete_commit = mdp4_complete_commit,
198 		.get_format      = mdp_get_format,
199 		.round_pixclk    = mdp4_round_pixclk,
200 		.destroy         = mdp4_destroy,
201 	},
202 	.set_irqmask         = mdp4_set_irqmask,
203 };
204 
205 int mdp4_disable(struct mdp4_kms *mdp4_kms)
206 {
207 	DBG("");
208 
209 	clk_disable_unprepare(mdp4_kms->clk);
210 	if (mdp4_kms->pclk)
211 		clk_disable_unprepare(mdp4_kms->pclk);
212 	if (mdp4_kms->lut_clk)
213 		clk_disable_unprepare(mdp4_kms->lut_clk);
214 	if (mdp4_kms->axi_clk)
215 		clk_disable_unprepare(mdp4_kms->axi_clk);
216 
217 	return 0;
218 }
219 
220 int mdp4_enable(struct mdp4_kms *mdp4_kms)
221 {
222 	DBG("");
223 
224 	clk_prepare_enable(mdp4_kms->clk);
225 	if (mdp4_kms->pclk)
226 		clk_prepare_enable(mdp4_kms->pclk);
227 	if (mdp4_kms->lut_clk)
228 		clk_prepare_enable(mdp4_kms->lut_clk);
229 	if (mdp4_kms->axi_clk)
230 		clk_prepare_enable(mdp4_kms->axi_clk);
231 
232 	return 0;
233 }
234 
235 
236 static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
237 				  int intf_type)
238 {
239 	struct drm_device *dev = mdp4_kms->dev;
240 	struct msm_drm_private *priv = dev->dev_private;
241 	struct drm_encoder *encoder;
242 	struct drm_connector *connector;
243 	struct device_node *panel_node;
244 	int dsi_id;
245 	int ret;
246 
247 	switch (intf_type) {
248 	case DRM_MODE_ENCODER_LVDS:
249 		/*
250 		 * bail out early if there is no panel node (no need to
251 		 * initialize LCDC encoder and LVDS connector)
252 		 */
253 		panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
254 		if (!panel_node)
255 			return 0;
256 
257 		encoder = mdp4_lcdc_encoder_init(dev, panel_node);
258 		if (IS_ERR(encoder)) {
259 			DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
260 			return PTR_ERR(encoder);
261 		}
262 
263 		/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
264 		encoder->possible_crtcs = 1 << DMA_P;
265 
266 		connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
267 		if (IS_ERR(connector)) {
268 			DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
269 			return PTR_ERR(connector);
270 		}
271 
272 		priv->encoders[priv->num_encoders++] = encoder;
273 		priv->connectors[priv->num_connectors++] = connector;
274 
275 		break;
276 	case DRM_MODE_ENCODER_TMDS:
277 		encoder = mdp4_dtv_encoder_init(dev);
278 		if (IS_ERR(encoder)) {
279 			DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
280 			return PTR_ERR(encoder);
281 		}
282 
283 		/* DTV can be hooked to DMA_E: */
284 		encoder->possible_crtcs = 1 << 1;
285 
286 		if (priv->hdmi) {
287 			/* Construct bridge/connector for HDMI: */
288 			ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
289 			if (ret) {
290 				DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
291 				return ret;
292 			}
293 		}
294 
295 		priv->encoders[priv->num_encoders++] = encoder;
296 
297 		break;
298 	case DRM_MODE_ENCODER_DSI:
299 		/* only DSI1 supported for now */
300 		dsi_id = 0;
301 
302 		if (!priv->dsi[dsi_id])
303 			break;
304 
305 		encoder = mdp4_dsi_encoder_init(dev);
306 		if (IS_ERR(encoder)) {
307 			ret = PTR_ERR(encoder);
308 			DRM_DEV_ERROR(dev->dev,
309 				"failed to construct DSI encoder: %d\n", ret);
310 			return ret;
311 		}
312 
313 		/* TODO: Add DMA_S later? */
314 		encoder->possible_crtcs = 1 << DMA_P;
315 		priv->encoders[priv->num_encoders++] = encoder;
316 
317 		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
318 		if (ret) {
319 			DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
320 				ret);
321 			return ret;
322 		}
323 
324 		break;
325 	default:
326 		DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
327 		return -EINVAL;
328 	}
329 
330 	return 0;
331 }
332 
333 static int modeset_init(struct mdp4_kms *mdp4_kms)
334 {
335 	struct drm_device *dev = mdp4_kms->dev;
336 	struct msm_drm_private *priv = dev->dev_private;
337 	struct drm_plane *plane;
338 	struct drm_crtc *crtc;
339 	int i, ret;
340 	static const enum mdp4_pipe rgb_planes[] = {
341 		RGB1, RGB2,
342 	};
343 	static const enum mdp4_pipe vg_planes[] = {
344 		VG1, VG2,
345 	};
346 	static const enum mdp4_dma mdp4_crtcs[] = {
347 		DMA_P, DMA_E,
348 	};
349 	static const char * const mdp4_crtc_names[] = {
350 		"DMA_P", "DMA_E",
351 	};
352 	static const int mdp4_intfs[] = {
353 		DRM_MODE_ENCODER_LVDS,
354 		DRM_MODE_ENCODER_DSI,
355 		DRM_MODE_ENCODER_TMDS,
356 	};
357 
358 	/* construct non-private planes: */
359 	for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
360 		plane = mdp4_plane_init(dev, vg_planes[i], false);
361 		if (IS_ERR(plane)) {
362 			DRM_DEV_ERROR(dev->dev,
363 				"failed to construct plane for VG%d\n", i + 1);
364 			ret = PTR_ERR(plane);
365 			goto fail;
366 		}
367 		priv->planes[priv->num_planes++] = plane;
368 	}
369 
370 	for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
371 		plane = mdp4_plane_init(dev, rgb_planes[i], true);
372 		if (IS_ERR(plane)) {
373 			DRM_DEV_ERROR(dev->dev,
374 				"failed to construct plane for RGB%d\n", i + 1);
375 			ret = PTR_ERR(plane);
376 			goto fail;
377 		}
378 
379 		crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
380 				mdp4_crtcs[i]);
381 		if (IS_ERR(crtc)) {
382 			DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
383 				mdp4_crtc_names[i]);
384 			ret = PTR_ERR(crtc);
385 			goto fail;
386 		}
387 
388 		priv->crtcs[priv->num_crtcs++] = crtc;
389 	}
390 
391 	/*
392 	 * we currently set up two relatively fixed paths:
393 	 *
394 	 * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
395 	 *			or
396 	 * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
397 	 *
398 	 * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
399 	 */
400 
401 	for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
402 		ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
403 		if (ret) {
404 			DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
405 				i, ret);
406 			goto fail;
407 		}
408 	}
409 
410 	return 0;
411 
412 fail:
413 	return ret;
414 }
415 
416 struct msm_kms *mdp4_kms_init(struct drm_device *dev)
417 {
418 	struct platform_device *pdev = to_platform_device(dev->dev);
419 	struct mdp4_platform_config *config = mdp4_get_config(pdev);
420 	struct mdp4_kms *mdp4_kms;
421 	struct msm_kms *kms = NULL;
422 	struct msm_gem_address_space *aspace;
423 	int irq, ret;
424 
425 	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
426 	if (!mdp4_kms) {
427 		DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
428 		ret = -ENOMEM;
429 		goto fail;
430 	}
431 
432 	ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
433 	if (ret) {
434 		DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
435 		goto fail;
436 	}
437 
438 	kms = &mdp4_kms->base.base;
439 
440 	mdp4_kms->dev = dev;
441 
442 	mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
443 	if (IS_ERR(mdp4_kms->mmio)) {
444 		ret = PTR_ERR(mdp4_kms->mmio);
445 		goto fail;
446 	}
447 
448 	irq = platform_get_irq(pdev, 0);
449 	if (irq < 0) {
450 		ret = irq;
451 		DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
452 		goto fail;
453 	}
454 
455 	kms->irq = irq;
456 
457 	/* NOTE: driver for this regulator still missing upstream.. use
458 	 * _get_exclusive() and ignore the error if it does not exist
459 	 * (and hope that the bootloader left it on for us)
460 	 */
461 	mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
462 	if (IS_ERR(mdp4_kms->vdd))
463 		mdp4_kms->vdd = NULL;
464 
465 	if (mdp4_kms->vdd) {
466 		ret = regulator_enable(mdp4_kms->vdd);
467 		if (ret) {
468 			DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
469 			goto fail;
470 		}
471 	}
472 
473 	mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
474 	if (IS_ERR(mdp4_kms->clk)) {
475 		DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
476 		ret = PTR_ERR(mdp4_kms->clk);
477 		goto fail;
478 	}
479 
480 	mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
481 	if (IS_ERR(mdp4_kms->pclk))
482 		mdp4_kms->pclk = NULL;
483 
484 	if (mdp4_kms->rev >= 2) {
485 		mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
486 		if (IS_ERR(mdp4_kms->lut_clk)) {
487 			DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
488 			ret = PTR_ERR(mdp4_kms->lut_clk);
489 			goto fail;
490 		}
491 	}
492 
493 	mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
494 	if (IS_ERR(mdp4_kms->axi_clk)) {
495 		DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
496 		ret = PTR_ERR(mdp4_kms->axi_clk);
497 		goto fail;
498 	}
499 
500 	clk_set_rate(mdp4_kms->clk, config->max_clk);
501 	if (mdp4_kms->lut_clk)
502 		clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
503 
504 	pm_runtime_enable(dev->dev);
505 	mdp4_kms->rpm_enabled = true;
506 
507 	/* make sure things are off before attaching iommu (bootloader could
508 	 * have left things on, in which case we'll start getting faults if
509 	 * we don't disable):
510 	 */
511 	mdp4_enable(mdp4_kms);
512 	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
513 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
514 	mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
515 	mdp4_disable(mdp4_kms);
516 	mdelay(16);
517 
518 	if (config->iommu) {
519 		struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
520 			config->iommu);
521 
522 		aspace  = msm_gem_address_space_create(mmu,
523 			"mdp4", 0x1000, 0x100000000 - 0x1000);
524 
525 		if (IS_ERR(aspace)) {
526 			if (!IS_ERR(mmu))
527 				mmu->funcs->destroy(mmu);
528 			ret = PTR_ERR(aspace);
529 			goto fail;
530 		}
531 
532 		kms->aspace = aspace;
533 	} else {
534 		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
535 				"contig buffers for scanout\n");
536 		aspace = NULL;
537 	}
538 
539 	ret = modeset_init(mdp4_kms);
540 	if (ret) {
541 		DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
542 		goto fail;
543 	}
544 
545 	mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
546 	if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
547 		ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
548 		DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
549 		mdp4_kms->blank_cursor_bo = NULL;
550 		goto fail;
551 	}
552 
553 	ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
554 			&mdp4_kms->blank_cursor_iova);
555 	if (ret) {
556 		DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
557 		goto fail;
558 	}
559 
560 	dev->mode_config.min_width = 0;
561 	dev->mode_config.min_height = 0;
562 	dev->mode_config.max_width = 2048;
563 	dev->mode_config.max_height = 2048;
564 
565 	return kms;
566 
567 fail:
568 	if (kms)
569 		mdp4_destroy(kms);
570 	return ERR_PTR(ret);
571 }
572 
573 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
574 {
575 	static struct mdp4_platform_config config = {};
576 
577 	/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
578 	config.max_clk = 266667000;
579 	config.iommu = iommu_domain_alloc(&platform_bus_type);
580 
581 	return &config;
582 }
583