1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #include <linux/of_irq.h>
9 
10 #include "msm_drv.h"
11 #include "msm_gem.h"
12 #include "msm_mmu.h"
13 #include "mdp5_kms.h"
14 
15 static const char *iommu_ports[] = {
16 		"mdp_0",
17 };
18 
19 static int mdp5_hw_init(struct msm_kms *kms)
20 {
21 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
22 	struct device *dev = &mdp5_kms->pdev->dev;
23 	unsigned long flags;
24 
25 	pm_runtime_get_sync(dev);
26 
27 	/* Magic unknown register writes:
28 	 *
29 	 *    W VBIF:0x004 00000001      (mdss_mdp.c:839)
30 	 *    W MDP5:0x2e0 0xe9          (mdss_mdp.c:839)
31 	 *    W MDP5:0x2e4 0x55          (mdss_mdp.c:839)
32 	 *    W MDP5:0x3ac 0xc0000ccc    (mdss_mdp.c:839)
33 	 *    W MDP5:0x3b4 0xc0000ccc    (mdss_mdp.c:839)
34 	 *    W MDP5:0x3bc 0xcccccc      (mdss_mdp.c:839)
35 	 *    W MDP5:0x4a8 0xcccc0c0     (mdss_mdp.c:839)
36 	 *    W MDP5:0x4b0 0xccccc0c0    (mdss_mdp.c:839)
37 	 *    W MDP5:0x4b8 0xccccc000    (mdss_mdp.c:839)
38 	 *
39 	 * Downstream fbdev driver gets these register offsets/values
40 	 * from DT.. not really sure what these registers are or if
41 	 * different values for different boards/SoC's, etc.  I guess
42 	 * they are the golden registers.
43 	 *
44 	 * Not setting these does not seem to cause any problem.  But
45 	 * we may be getting lucky with the bootloader initializing
46 	 * them for us.  OTOH, if we can always count on the bootloader
47 	 * setting the golden registers, then perhaps we don't need to
48 	 * care.
49 	 */
50 
51 	spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
52 	mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
53 	spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
54 
55 	mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
56 
57 	pm_runtime_put_sync(dev);
58 
59 	return 0;
60 }
61 
62 /* Global/shared object state funcs */
63 
64 /*
65  * This is a helper that returns the private state currently in operation.
66  * Note that this would return the "old_state" if called in the atomic check
67  * path, and the "new_state" after the atomic swap has been done.
68  */
69 struct mdp5_global_state *
70 mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
71 {
72 	return to_mdp5_global_state(mdp5_kms->glob_state.state);
73 }
74 
75 /*
76  * This acquires the modeset lock set aside for global state, creates
77  * a new duplicated private object state.
78  */
79 struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
80 {
81 	struct msm_drm_private *priv = s->dev->dev_private;
82 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
83 	struct drm_private_state *priv_state;
84 	int ret;
85 
86 	ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
87 	if (ret)
88 		return ERR_PTR(ret);
89 
90 	priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
91 	if (IS_ERR(priv_state))
92 		return ERR_CAST(priv_state);
93 
94 	return to_mdp5_global_state(priv_state);
95 }
96 
97 static struct drm_private_state *
98 mdp5_global_duplicate_state(struct drm_private_obj *obj)
99 {
100 	struct mdp5_global_state *state;
101 
102 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
103 	if (!state)
104 		return NULL;
105 
106 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
107 
108 	return &state->base;
109 }
110 
111 static void mdp5_global_destroy_state(struct drm_private_obj *obj,
112 				      struct drm_private_state *state)
113 {
114 	struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
115 
116 	kfree(mdp5_state);
117 }
118 
119 static const struct drm_private_state_funcs mdp5_global_state_funcs = {
120 	.atomic_duplicate_state = mdp5_global_duplicate_state,
121 	.atomic_destroy_state = mdp5_global_destroy_state,
122 };
123 
124 static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
125 {
126 	struct mdp5_global_state *state;
127 
128 	drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
129 
130 	state = kzalloc(sizeof(*state), GFP_KERNEL);
131 	if (!state)
132 		return -ENOMEM;
133 
134 	state->mdp5_kms = mdp5_kms;
135 
136 	drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
137 				    &state->base,
138 				    &mdp5_global_state_funcs);
139 	return 0;
140 }
141 
142 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
143 {
144 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
145 	struct device *dev = &mdp5_kms->pdev->dev;
146 	struct mdp5_global_state *global_state;
147 
148 	global_state = mdp5_get_existing_global_state(mdp5_kms);
149 
150 	pm_runtime_get_sync(dev);
151 
152 	if (mdp5_kms->smp)
153 		mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
154 }
155 
156 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
157 {
158 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
159 	struct device *dev = &mdp5_kms->pdev->dev;
160 	struct mdp5_global_state *global_state;
161 
162 	drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
163 
164 	global_state = mdp5_get_existing_global_state(mdp5_kms);
165 
166 	if (mdp5_kms->smp)
167 		mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
168 
169 	pm_runtime_put_sync(dev);
170 }
171 
172 static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
173 						struct drm_crtc *crtc)
174 {
175 	mdp5_crtc_wait_for_commit_done(crtc);
176 }
177 
178 static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
179 		struct drm_encoder *encoder)
180 {
181 	return rate;
182 }
183 
184 static int mdp5_set_split_display(struct msm_kms *kms,
185 		struct drm_encoder *encoder,
186 		struct drm_encoder *slave_encoder,
187 		bool is_cmd_mode)
188 {
189 	if (is_cmd_mode)
190 		return mdp5_cmd_encoder_set_split_display(encoder,
191 							slave_encoder);
192 	else
193 		return mdp5_vid_encoder_set_split_display(encoder,
194 							  slave_encoder);
195 }
196 
197 static void mdp5_set_encoder_mode(struct msm_kms *kms,
198 				  struct drm_encoder *encoder,
199 				  bool cmd_mode)
200 {
201 	mdp5_encoder_set_intf_mode(encoder, cmd_mode);
202 }
203 
204 static void mdp5_kms_destroy(struct msm_kms *kms)
205 {
206 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
207 	struct msm_gem_address_space *aspace = kms->aspace;
208 	int i;
209 
210 	for (i = 0; i < mdp5_kms->num_hwmixers; i++)
211 		mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
212 
213 	for (i = 0; i < mdp5_kms->num_hwpipes; i++)
214 		mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
215 
216 	if (aspace) {
217 		aspace->mmu->funcs->detach(aspace->mmu,
218 				iommu_ports, ARRAY_SIZE(iommu_ports));
219 		msm_gem_address_space_put(aspace);
220 	}
221 }
222 
223 #ifdef CONFIG_DEBUG_FS
224 static int smp_show(struct seq_file *m, void *arg)
225 {
226 	struct drm_info_node *node = (struct drm_info_node *) m->private;
227 	struct drm_device *dev = node->minor->dev;
228 	struct msm_drm_private *priv = dev->dev_private;
229 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
230 	struct drm_printer p = drm_seq_file_printer(m);
231 
232 	if (!mdp5_kms->smp) {
233 		drm_printf(&p, "no SMP pool\n");
234 		return 0;
235 	}
236 
237 	mdp5_smp_dump(mdp5_kms->smp, &p);
238 
239 	return 0;
240 }
241 
242 static struct drm_info_list mdp5_debugfs_list[] = {
243 		{"smp", smp_show },
244 };
245 
246 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
247 {
248 	struct drm_device *dev = minor->dev;
249 	int ret;
250 
251 	ret = drm_debugfs_create_files(mdp5_debugfs_list,
252 			ARRAY_SIZE(mdp5_debugfs_list),
253 			minor->debugfs_root, minor);
254 
255 	if (ret) {
256 		DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
257 		return ret;
258 	}
259 
260 	return 0;
261 }
262 #endif
263 
264 static const struct mdp_kms_funcs kms_funcs = {
265 	.base = {
266 		.hw_init         = mdp5_hw_init,
267 		.irq_preinstall  = mdp5_irq_preinstall,
268 		.irq_postinstall = mdp5_irq_postinstall,
269 		.irq_uninstall   = mdp5_irq_uninstall,
270 		.irq             = mdp5_irq,
271 		.enable_vblank   = mdp5_enable_vblank,
272 		.disable_vblank  = mdp5_disable_vblank,
273 		.prepare_commit  = mdp5_prepare_commit,
274 		.complete_commit = mdp5_complete_commit,
275 		.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
276 		.get_format      = mdp_get_format,
277 		.round_pixclk    = mdp5_round_pixclk,
278 		.set_split_display = mdp5_set_split_display,
279 		.set_encoder_mode = mdp5_set_encoder_mode,
280 		.destroy         = mdp5_kms_destroy,
281 #ifdef CONFIG_DEBUG_FS
282 		.debugfs_init    = mdp5_kms_debugfs_init,
283 #endif
284 	},
285 	.set_irqmask         = mdp5_set_irqmask,
286 };
287 
288 int mdp5_disable(struct mdp5_kms *mdp5_kms)
289 {
290 	DBG("");
291 
292 	mdp5_kms->enable_count--;
293 	WARN_ON(mdp5_kms->enable_count < 0);
294 
295 	clk_disable_unprepare(mdp5_kms->ahb_clk);
296 	clk_disable_unprepare(mdp5_kms->axi_clk);
297 	clk_disable_unprepare(mdp5_kms->core_clk);
298 	if (mdp5_kms->lut_clk)
299 		clk_disable_unprepare(mdp5_kms->lut_clk);
300 
301 	return 0;
302 }
303 
304 int mdp5_enable(struct mdp5_kms *mdp5_kms)
305 {
306 	DBG("");
307 
308 	mdp5_kms->enable_count++;
309 
310 	clk_prepare_enable(mdp5_kms->ahb_clk);
311 	clk_prepare_enable(mdp5_kms->axi_clk);
312 	clk_prepare_enable(mdp5_kms->core_clk);
313 	if (mdp5_kms->lut_clk)
314 		clk_prepare_enable(mdp5_kms->lut_clk);
315 
316 	return 0;
317 }
318 
319 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
320 					     struct mdp5_interface *intf,
321 					     struct mdp5_ctl *ctl)
322 {
323 	struct drm_device *dev = mdp5_kms->dev;
324 	struct msm_drm_private *priv = dev->dev_private;
325 	struct drm_encoder *encoder;
326 
327 	encoder = mdp5_encoder_init(dev, intf, ctl);
328 	if (IS_ERR(encoder)) {
329 		DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
330 		return encoder;
331 	}
332 
333 	priv->encoders[priv->num_encoders++] = encoder;
334 
335 	return encoder;
336 }
337 
338 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
339 {
340 	const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
341 	const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
342 	int id = 0, i;
343 
344 	for (i = 0; i < intf_cnt; i++) {
345 		if (intfs[i] == INTF_DSI) {
346 			if (intf_num == i)
347 				return id;
348 
349 			id++;
350 		}
351 	}
352 
353 	return -EINVAL;
354 }
355 
356 static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
357 			     struct mdp5_interface *intf)
358 {
359 	struct drm_device *dev = mdp5_kms->dev;
360 	struct msm_drm_private *priv = dev->dev_private;
361 	struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
362 	struct mdp5_ctl *ctl;
363 	struct drm_encoder *encoder;
364 	int ret = 0;
365 
366 	switch (intf->type) {
367 	case INTF_eDP:
368 		if (!priv->edp)
369 			break;
370 
371 		ctl = mdp5_ctlm_request(ctlm, intf->num);
372 		if (!ctl) {
373 			ret = -EINVAL;
374 			break;
375 		}
376 
377 		encoder = construct_encoder(mdp5_kms, intf, ctl);
378 		if (IS_ERR(encoder)) {
379 			ret = PTR_ERR(encoder);
380 			break;
381 		}
382 
383 		ret = msm_edp_modeset_init(priv->edp, dev, encoder);
384 		break;
385 	case INTF_HDMI:
386 		if (!priv->hdmi)
387 			break;
388 
389 		ctl = mdp5_ctlm_request(ctlm, intf->num);
390 		if (!ctl) {
391 			ret = -EINVAL;
392 			break;
393 		}
394 
395 		encoder = construct_encoder(mdp5_kms, intf, ctl);
396 		if (IS_ERR(encoder)) {
397 			ret = PTR_ERR(encoder);
398 			break;
399 		}
400 
401 		ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
402 		break;
403 	case INTF_DSI:
404 	{
405 		const struct mdp5_cfg_hw *hw_cfg =
406 					mdp5_cfg_get_hw_config(mdp5_kms->cfg);
407 		int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
408 
409 		if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
410 			DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
411 				intf->num);
412 			ret = -EINVAL;
413 			break;
414 		}
415 
416 		if (!priv->dsi[dsi_id])
417 			break;
418 
419 		ctl = mdp5_ctlm_request(ctlm, intf->num);
420 		if (!ctl) {
421 			ret = -EINVAL;
422 			break;
423 		}
424 
425 		encoder = construct_encoder(mdp5_kms, intf, ctl);
426 		if (IS_ERR(encoder)) {
427 			ret = PTR_ERR(encoder);
428 			break;
429 		}
430 
431 		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
432 		break;
433 	}
434 	default:
435 		DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
436 		ret = -EINVAL;
437 		break;
438 	}
439 
440 	return ret;
441 }
442 
443 static int modeset_init(struct mdp5_kms *mdp5_kms)
444 {
445 	struct drm_device *dev = mdp5_kms->dev;
446 	struct msm_drm_private *priv = dev->dev_private;
447 	const struct mdp5_cfg_hw *hw_cfg;
448 	unsigned int num_crtcs;
449 	int i, ret, pi = 0, ci = 0;
450 	struct drm_plane *primary[MAX_BASES] = { NULL };
451 	struct drm_plane *cursor[MAX_BASES] = { NULL };
452 
453 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
454 
455 	/*
456 	 * Construct encoders and modeset initialize connector devices
457 	 * for each external display interface.
458 	 */
459 	for (i = 0; i < mdp5_kms->num_intfs; i++) {
460 		ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
461 		if (ret)
462 			goto fail;
463 	}
464 
465 	/*
466 	 * We should ideally have less number of encoders (set up by parsing
467 	 * the MDP5 interfaces) than the number of layer mixers present in HW,
468 	 * but let's be safe here anyway
469 	 */
470 	num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
471 
472 	/*
473 	 * Construct planes equaling the number of hw pipes, and CRTCs for the
474 	 * N encoders set up by the driver. The first N planes become primary
475 	 * planes for the CRTCs, with the remainder as overlay planes:
476 	 */
477 	for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
478 		struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
479 		struct drm_plane *plane;
480 		enum drm_plane_type type;
481 
482 		if (i < num_crtcs)
483 			type = DRM_PLANE_TYPE_PRIMARY;
484 		else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
485 			type = DRM_PLANE_TYPE_CURSOR;
486 		else
487 			type = DRM_PLANE_TYPE_OVERLAY;
488 
489 		plane = mdp5_plane_init(dev, type);
490 		if (IS_ERR(plane)) {
491 			ret = PTR_ERR(plane);
492 			DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
493 			goto fail;
494 		}
495 		priv->planes[priv->num_planes++] = plane;
496 
497 		if (type == DRM_PLANE_TYPE_PRIMARY)
498 			primary[pi++] = plane;
499 		if (type == DRM_PLANE_TYPE_CURSOR)
500 			cursor[ci++] = plane;
501 	}
502 
503 	for (i = 0; i < num_crtcs; i++) {
504 		struct drm_crtc *crtc;
505 
506 		crtc  = mdp5_crtc_init(dev, primary[i], cursor[i], i);
507 		if (IS_ERR(crtc)) {
508 			ret = PTR_ERR(crtc);
509 			DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
510 			goto fail;
511 		}
512 		priv->crtcs[priv->num_crtcs++] = crtc;
513 	}
514 
515 	/*
516 	 * Now that we know the number of crtcs we've created, set the possible
517 	 * crtcs for the encoders
518 	 */
519 	for (i = 0; i < priv->num_encoders; i++) {
520 		struct drm_encoder *encoder = priv->encoders[i];
521 
522 		encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
523 	}
524 
525 	return 0;
526 
527 fail:
528 	return ret;
529 }
530 
531 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
532 				 u32 *major, u32 *minor)
533 {
534 	struct device *dev = &mdp5_kms->pdev->dev;
535 	u32 version;
536 
537 	pm_runtime_get_sync(dev);
538 	version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
539 	pm_runtime_put_sync(dev);
540 
541 	*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
542 	*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
543 
544 	DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
545 }
546 
547 static int get_clk(struct platform_device *pdev, struct clk **clkp,
548 		const char *name, bool mandatory)
549 {
550 	struct device *dev = &pdev->dev;
551 	struct clk *clk = msm_clk_get(pdev, name);
552 	if (IS_ERR(clk) && mandatory) {
553 		DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
554 		return PTR_ERR(clk);
555 	}
556 	if (IS_ERR(clk))
557 		DBG("skipping %s", name);
558 	else
559 		*clkp = clk;
560 
561 	return 0;
562 }
563 
564 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
565 {
566 	struct drm_device *dev = crtc->dev;
567 	struct drm_encoder *encoder;
568 
569 	drm_for_each_encoder(encoder, dev)
570 		if (encoder->crtc == crtc)
571 			return encoder;
572 
573 	return NULL;
574 }
575 
576 static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
577 				bool in_vblank_irq, int *vpos, int *hpos,
578 				ktime_t *stime, ktime_t *etime,
579 				const struct drm_display_mode *mode)
580 {
581 	struct msm_drm_private *priv = dev->dev_private;
582 	struct drm_crtc *crtc;
583 	struct drm_encoder *encoder;
584 	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
585 
586 	crtc = priv->crtcs[pipe];
587 	if (!crtc) {
588 		DRM_ERROR("Invalid crtc %d\n", pipe);
589 		return false;
590 	}
591 
592 	encoder = get_encoder_from_crtc(crtc);
593 	if (!encoder) {
594 		DRM_ERROR("no encoder found for crtc %d\n", pipe);
595 		return false;
596 	}
597 
598 	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
599 	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
600 
601 	/*
602 	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
603 	 * the end of VFP. Translate the porch values relative to the line
604 	 * counter positions.
605 	 */
606 
607 	vactive_start = vsw + vbp + 1;
608 
609 	vactive_end = vactive_start + mode->crtc_vdisplay;
610 
611 	/* last scan line before VSYNC */
612 	vfp_end = mode->crtc_vtotal;
613 
614 	if (stime)
615 		*stime = ktime_get();
616 
617 	line = mdp5_encoder_get_linecount(encoder);
618 
619 	if (line < vactive_start) {
620 		line -= vactive_start;
621 	} else if (line > vactive_end) {
622 		line = line - vfp_end - vactive_start;
623 	} else {
624 		line -= vactive_start;
625 	}
626 
627 	*vpos = line;
628 	*hpos = 0;
629 
630 	if (etime)
631 		*etime = ktime_get();
632 
633 	return true;
634 }
635 
636 static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
637 {
638 	struct msm_drm_private *priv = dev->dev_private;
639 	struct drm_crtc *crtc;
640 	struct drm_encoder *encoder;
641 
642 	if (pipe >= priv->num_crtcs)
643 		return 0;
644 
645 	crtc = priv->crtcs[pipe];
646 	if (!crtc)
647 		return 0;
648 
649 	encoder = get_encoder_from_crtc(crtc);
650 	if (!encoder)
651 		return 0;
652 
653 	return mdp5_encoder_get_framecount(encoder);
654 }
655 
656 struct msm_kms *mdp5_kms_init(struct drm_device *dev)
657 {
658 	struct msm_drm_private *priv = dev->dev_private;
659 	struct platform_device *pdev;
660 	struct mdp5_kms *mdp5_kms;
661 	struct mdp5_cfg *config;
662 	struct msm_kms *kms;
663 	struct msm_gem_address_space *aspace;
664 	int irq, i, ret;
665 
666 	/* priv->kms would have been populated by the MDP5 driver */
667 	kms = priv->kms;
668 	if (!kms)
669 		return NULL;
670 
671 	mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
672 
673 	mdp_kms_init(&mdp5_kms->base, &kms_funcs);
674 
675 	pdev = mdp5_kms->pdev;
676 
677 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
678 	if (irq < 0) {
679 		ret = irq;
680 		DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
681 		goto fail;
682 	}
683 
684 	kms->irq = irq;
685 
686 	config = mdp5_cfg_get_config(mdp5_kms->cfg);
687 
688 	/* make sure things are off before attaching iommu (bootloader could
689 	 * have left things on, in which case we'll start getting faults if
690 	 * we don't disable):
691 	 */
692 	pm_runtime_get_sync(&pdev->dev);
693 	for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
694 		if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
695 		    !config->hw->intf.base[i])
696 			continue;
697 		mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
698 
699 		mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
700 	}
701 	mdelay(16);
702 
703 	if (config->platform.iommu) {
704 		aspace = msm_gem_address_space_create(&pdev->dev,
705 				config->platform.iommu, "mdp5");
706 		if (IS_ERR(aspace)) {
707 			ret = PTR_ERR(aspace);
708 			goto fail;
709 		}
710 
711 		kms->aspace = aspace;
712 
713 		ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
714 				ARRAY_SIZE(iommu_ports));
715 		if (ret) {
716 			DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
717 				ret);
718 			goto fail;
719 		}
720 	} else {
721 		DRM_DEV_INFO(&pdev->dev,
722 			 "no iommu, fallback to phys contig buffers for scanout\n");
723 		aspace = NULL;
724 	}
725 
726 	pm_runtime_put_sync(&pdev->dev);
727 
728 	ret = modeset_init(mdp5_kms);
729 	if (ret) {
730 		DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
731 		goto fail;
732 	}
733 
734 	dev->mode_config.min_width = 0;
735 	dev->mode_config.min_height = 0;
736 	dev->mode_config.max_width = 0xffff;
737 	dev->mode_config.max_height = 0xffff;
738 
739 	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
740 	dev->driver->get_scanout_position = mdp5_get_scanoutpos;
741 	dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
742 	dev->max_vblank_count = 0xffffffff;
743 	dev->vblank_disable_immediate = true;
744 
745 	return kms;
746 fail:
747 	if (kms)
748 		mdp5_kms_destroy(kms);
749 	return ERR_PTR(ret);
750 }
751 
752 static void mdp5_destroy(struct platform_device *pdev)
753 {
754 	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
755 	int i;
756 
757 	if (mdp5_kms->ctlm)
758 		mdp5_ctlm_destroy(mdp5_kms->ctlm);
759 	if (mdp5_kms->smp)
760 		mdp5_smp_destroy(mdp5_kms->smp);
761 	if (mdp5_kms->cfg)
762 		mdp5_cfg_destroy(mdp5_kms->cfg);
763 
764 	for (i = 0; i < mdp5_kms->num_intfs; i++)
765 		kfree(mdp5_kms->intfs[i]);
766 
767 	if (mdp5_kms->rpm_enabled)
768 		pm_runtime_disable(&pdev->dev);
769 
770 	drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
771 	drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
772 }
773 
774 static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
775 		const enum mdp5_pipe *pipes, const uint32_t *offsets,
776 		uint32_t caps)
777 {
778 	struct drm_device *dev = mdp5_kms->dev;
779 	int i, ret;
780 
781 	for (i = 0; i < cnt; i++) {
782 		struct mdp5_hw_pipe *hwpipe;
783 
784 		hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
785 		if (IS_ERR(hwpipe)) {
786 			ret = PTR_ERR(hwpipe);
787 			DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
788 					pipe2name(pipes[i]), ret);
789 			return ret;
790 		}
791 		hwpipe->idx = mdp5_kms->num_hwpipes;
792 		mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
793 	}
794 
795 	return 0;
796 }
797 
798 static int hwpipe_init(struct mdp5_kms *mdp5_kms)
799 {
800 	static const enum mdp5_pipe rgb_planes[] = {
801 			SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
802 	};
803 	static const enum mdp5_pipe vig_planes[] = {
804 			SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
805 	};
806 	static const enum mdp5_pipe dma_planes[] = {
807 			SSPP_DMA0, SSPP_DMA1,
808 	};
809 	static const enum mdp5_pipe cursor_planes[] = {
810 			SSPP_CURSOR0, SSPP_CURSOR1,
811 	};
812 	const struct mdp5_cfg_hw *hw_cfg;
813 	int ret;
814 
815 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
816 
817 	/* Construct RGB pipes: */
818 	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
819 			hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
820 	if (ret)
821 		return ret;
822 
823 	/* Construct video (VIG) pipes: */
824 	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
825 			hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
826 	if (ret)
827 		return ret;
828 
829 	/* Construct DMA pipes: */
830 	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
831 			hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
832 	if (ret)
833 		return ret;
834 
835 	/* Construct cursor pipes: */
836 	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
837 			cursor_planes, hw_cfg->pipe_cursor.base,
838 			hw_cfg->pipe_cursor.caps);
839 	if (ret)
840 		return ret;
841 
842 	return 0;
843 }
844 
845 static int hwmixer_init(struct mdp5_kms *mdp5_kms)
846 {
847 	struct drm_device *dev = mdp5_kms->dev;
848 	const struct mdp5_cfg_hw *hw_cfg;
849 	int i, ret;
850 
851 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
852 
853 	for (i = 0; i < hw_cfg->lm.count; i++) {
854 		struct mdp5_hw_mixer *mixer;
855 
856 		mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
857 		if (IS_ERR(mixer)) {
858 			ret = PTR_ERR(mixer);
859 			DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
860 				i, ret);
861 			return ret;
862 		}
863 
864 		mixer->idx = mdp5_kms->num_hwmixers;
865 		mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
866 	}
867 
868 	return 0;
869 }
870 
871 static int interface_init(struct mdp5_kms *mdp5_kms)
872 {
873 	struct drm_device *dev = mdp5_kms->dev;
874 	const struct mdp5_cfg_hw *hw_cfg;
875 	const enum mdp5_intf_type *intf_types;
876 	int i;
877 
878 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
879 	intf_types = hw_cfg->intf.connect;
880 
881 	for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
882 		struct mdp5_interface *intf;
883 
884 		if (intf_types[i] == INTF_DISABLED)
885 			continue;
886 
887 		intf = kzalloc(sizeof(*intf), GFP_KERNEL);
888 		if (!intf) {
889 			DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
890 			return -ENOMEM;
891 		}
892 
893 		intf->num = i;
894 		intf->type = intf_types[i];
895 		intf->mode = MDP5_INTF_MODE_NONE;
896 		intf->idx = mdp5_kms->num_intfs;
897 		mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
898 	}
899 
900 	return 0;
901 }
902 
903 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
904 {
905 	struct msm_drm_private *priv = dev->dev_private;
906 	struct mdp5_kms *mdp5_kms;
907 	struct mdp5_cfg *config;
908 	u32 major, minor;
909 	int ret;
910 
911 	mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
912 	if (!mdp5_kms) {
913 		ret = -ENOMEM;
914 		goto fail;
915 	}
916 
917 	platform_set_drvdata(pdev, mdp5_kms);
918 
919 	spin_lock_init(&mdp5_kms->resource_lock);
920 
921 	mdp5_kms->dev = dev;
922 	mdp5_kms->pdev = pdev;
923 
924 	ret = mdp5_global_obj_init(mdp5_kms);
925 	if (ret)
926 		goto fail;
927 
928 	mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
929 	if (IS_ERR(mdp5_kms->mmio)) {
930 		ret = PTR_ERR(mdp5_kms->mmio);
931 		goto fail;
932 	}
933 
934 	/* mandatory clocks: */
935 	ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
936 	if (ret)
937 		goto fail;
938 	ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
939 	if (ret)
940 		goto fail;
941 	ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
942 	if (ret)
943 		goto fail;
944 	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
945 	if (ret)
946 		goto fail;
947 
948 	/* optional clocks: */
949 	get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
950 
951 	/* we need to set a default rate before enabling.  Set a safe
952 	 * rate first, then figure out hw revision, and then set a
953 	 * more optimal rate:
954 	 */
955 	clk_set_rate(mdp5_kms->core_clk, 200000000);
956 
957 	pm_runtime_enable(&pdev->dev);
958 	mdp5_kms->rpm_enabled = true;
959 
960 	read_mdp_hw_revision(mdp5_kms, &major, &minor);
961 
962 	mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
963 	if (IS_ERR(mdp5_kms->cfg)) {
964 		ret = PTR_ERR(mdp5_kms->cfg);
965 		mdp5_kms->cfg = NULL;
966 		goto fail;
967 	}
968 
969 	config = mdp5_cfg_get_config(mdp5_kms->cfg);
970 	mdp5_kms->caps = config->hw->mdp.caps;
971 
972 	/* TODO: compute core clock rate at runtime */
973 	clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
974 
975 	/*
976 	 * Some chipsets have a Shared Memory Pool (SMP), while others
977 	 * have dedicated latency buffering per source pipe instead;
978 	 * this section initializes the SMP:
979 	 */
980 	if (mdp5_kms->caps & MDP_CAP_SMP) {
981 		mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
982 		if (IS_ERR(mdp5_kms->smp)) {
983 			ret = PTR_ERR(mdp5_kms->smp);
984 			mdp5_kms->smp = NULL;
985 			goto fail;
986 		}
987 	}
988 
989 	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
990 	if (IS_ERR(mdp5_kms->ctlm)) {
991 		ret = PTR_ERR(mdp5_kms->ctlm);
992 		mdp5_kms->ctlm = NULL;
993 		goto fail;
994 	}
995 
996 	ret = hwpipe_init(mdp5_kms);
997 	if (ret)
998 		goto fail;
999 
1000 	ret = hwmixer_init(mdp5_kms);
1001 	if (ret)
1002 		goto fail;
1003 
1004 	ret = interface_init(mdp5_kms);
1005 	if (ret)
1006 		goto fail;
1007 
1008 	/* set uninit-ed kms */
1009 	priv->kms = &mdp5_kms->base.base;
1010 
1011 	return 0;
1012 fail:
1013 	mdp5_destroy(pdev);
1014 	return ret;
1015 }
1016 
1017 static int mdp5_bind(struct device *dev, struct device *master, void *data)
1018 {
1019 	struct drm_device *ddev = dev_get_drvdata(master);
1020 	struct platform_device *pdev = to_platform_device(dev);
1021 
1022 	DBG("");
1023 
1024 	return mdp5_init(pdev, ddev);
1025 }
1026 
1027 static void mdp5_unbind(struct device *dev, struct device *master,
1028 			void *data)
1029 {
1030 	struct platform_device *pdev = to_platform_device(dev);
1031 
1032 	mdp5_destroy(pdev);
1033 }
1034 
1035 static const struct component_ops mdp5_ops = {
1036 	.bind   = mdp5_bind,
1037 	.unbind = mdp5_unbind,
1038 };
1039 
1040 static int mdp5_dev_probe(struct platform_device *pdev)
1041 {
1042 	DBG("");
1043 	return component_add(&pdev->dev, &mdp5_ops);
1044 }
1045 
1046 static int mdp5_dev_remove(struct platform_device *pdev)
1047 {
1048 	DBG("");
1049 	component_del(&pdev->dev, &mdp5_ops);
1050 	return 0;
1051 }
1052 
1053 static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
1054 {
1055 	struct platform_device *pdev = to_platform_device(dev);
1056 	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
1057 
1058 	DBG("");
1059 
1060 	return mdp5_disable(mdp5_kms);
1061 }
1062 
1063 static __maybe_unused int mdp5_runtime_resume(struct device *dev)
1064 {
1065 	struct platform_device *pdev = to_platform_device(dev);
1066 	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
1067 
1068 	DBG("");
1069 
1070 	return mdp5_enable(mdp5_kms);
1071 }
1072 
1073 static const struct dev_pm_ops mdp5_pm_ops = {
1074 	SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
1075 };
1076 
1077 static const struct of_device_id mdp5_dt_match[] = {
1078 	{ .compatible = "qcom,mdp5", },
1079 	/* to support downstream DT files */
1080 	{ .compatible = "qcom,mdss_mdp", },
1081 	{}
1082 };
1083 MODULE_DEVICE_TABLE(of, mdp5_dt_match);
1084 
1085 static struct platform_driver mdp5_driver = {
1086 	.probe = mdp5_dev_probe,
1087 	.remove = mdp5_dev_remove,
1088 	.driver = {
1089 		.name = "msm_mdp",
1090 		.of_match_table = mdp5_dt_match,
1091 		.pm = &mdp5_pm_ops,
1092 	},
1093 };
1094 
1095 void __init msm_mdp_register(void)
1096 {
1097 	DBG("");
1098 	platform_driver_register(&mdp5_driver);
1099 }
1100 
1101 void __exit msm_mdp_unregister(void)
1102 {
1103 	DBG("");
1104 	platform_driver_unregister(&mdp5_driver);
1105 }
1106