xref: /openbmc/linux/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c (revision 0b003749)
1 /*
2  * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
20 
21 #include <drm/drm_crtc.h>
22 #include <linux/debugfs.h>
23 #include <linux/of_irq.h>
24 #include <linux/dma-buf.h>
25 
26 #include "msm_drv.h"
27 #include "msm_mmu.h"
28 #include "msm_gem.h"
29 
30 #include "dpu_kms.h"
31 #include "dpu_core_irq.h"
32 #include "dpu_formats.h"
33 #include "dpu_hw_vbif.h"
34 #include "dpu_vbif.h"
35 #include "dpu_encoder.h"
36 #include "dpu_plane.h"
37 #include "dpu_crtc.h"
38 
39 #define CREATE_TRACE_POINTS
40 #include "dpu_trace.h"
41 
42 static const char * const iommu_ports[] = {
43 		"mdp_0",
44 };
45 
46 /*
47  * To enable overall DRM driver logging
48  * # echo 0x2 > /sys/module/drm/parameters/debug
49  *
50  * To enable DRM driver h/w logging
51  * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
52  *
53  * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
54  */
55 #define DPU_DEBUGFS_DIR "msm_dpu"
56 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
57 
58 static int dpu_kms_hw_init(struct msm_kms *kms);
59 static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
60 
61 static unsigned long dpu_iomap_size(struct platform_device *pdev,
62 				    const char *name)
63 {
64 	struct resource *res;
65 
66 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
67 	if (!res) {
68 		DRM_ERROR("failed to get memory resource: %s\n", name);
69 		return 0;
70 	}
71 
72 	return resource_size(res);
73 }
74 
75 #ifdef CONFIG_DEBUG_FS
76 static int _dpu_danger_signal_status(struct seq_file *s,
77 		bool danger_status)
78 {
79 	struct dpu_kms *kms = (struct dpu_kms *)s->private;
80 	struct msm_drm_private *priv;
81 	struct dpu_danger_safe_status status;
82 	int i;
83 
84 	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
85 		DPU_ERROR("invalid arg(s)\n");
86 		return 0;
87 	}
88 
89 	priv = kms->dev->dev_private;
90 	memset(&status, 0, sizeof(struct dpu_danger_safe_status));
91 
92 	pm_runtime_get_sync(&kms->pdev->dev);
93 	if (danger_status) {
94 		seq_puts(s, "\nDanger signal status:\n");
95 		if (kms->hw_mdp->ops.get_danger_status)
96 			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
97 					&status);
98 	} else {
99 		seq_puts(s, "\nSafe signal status:\n");
100 		if (kms->hw_mdp->ops.get_danger_status)
101 			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
102 					&status);
103 	}
104 	pm_runtime_put_sync(&kms->pdev->dev);
105 
106 	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
107 
108 	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
109 		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
110 				status.sspp[i]);
111 	seq_puts(s, "\n");
112 
113 	return 0;
114 }
115 
116 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
117 static int __prefix ## _open(struct inode *inode, struct file *file)	\
118 {									\
119 	return single_open(file, __prefix ## _show, inode->i_private);	\
120 }									\
121 static const struct file_operations __prefix ## _fops = {		\
122 	.owner = THIS_MODULE,						\
123 	.open = __prefix ## _open,					\
124 	.release = single_release,					\
125 	.read = seq_read,						\
126 	.llseek = seq_lseek,						\
127 }
128 
129 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
130 {
131 	return _dpu_danger_signal_status(s, true);
132 }
133 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
134 
135 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
136 {
137 	return _dpu_danger_signal_status(s, false);
138 }
139 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
140 
141 static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
142 {
143 	debugfs_remove_recursive(dpu_kms->debugfs_danger);
144 	dpu_kms->debugfs_danger = NULL;
145 }
146 
147 static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
148 		struct dentry *parent)
149 {
150 	dpu_kms->debugfs_danger = debugfs_create_dir("danger",
151 			parent);
152 	if (!dpu_kms->debugfs_danger) {
153 		DPU_ERROR("failed to create danger debugfs\n");
154 		return -EINVAL;
155 	}
156 
157 	debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
158 			dpu_kms, &dpu_debugfs_danger_stats_fops);
159 	debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
160 			dpu_kms, &dpu_debugfs_safe_stats_fops);
161 
162 	return 0;
163 }
164 
165 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
166 {
167 	struct dpu_debugfs_regset32 *regset;
168 	struct dpu_kms *dpu_kms;
169 	struct drm_device *dev;
170 	struct msm_drm_private *priv;
171 	void __iomem *base;
172 	uint32_t i, addr;
173 
174 	if (!s || !s->private)
175 		return 0;
176 
177 	regset = s->private;
178 
179 	dpu_kms = regset->dpu_kms;
180 	if (!dpu_kms || !dpu_kms->mmio)
181 		return 0;
182 
183 	dev = dpu_kms->dev;
184 	if (!dev)
185 		return 0;
186 
187 	priv = dev->dev_private;
188 	if (!priv)
189 		return 0;
190 
191 	base = dpu_kms->mmio + regset->offset;
192 
193 	/* insert padding spaces, if needed */
194 	if (regset->offset & 0xF) {
195 		seq_printf(s, "[%x]", regset->offset & ~0xF);
196 		for (i = 0; i < (regset->offset & 0xF); i += 4)
197 			seq_puts(s, "         ");
198 	}
199 
200 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
201 
202 	/* main register output */
203 	for (i = 0; i < regset->blk_len; i += 4) {
204 		addr = regset->offset + i;
205 		if ((addr & 0xF) == 0x0)
206 			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
207 		seq_printf(s, " %08x", readl_relaxed(base + i));
208 	}
209 	seq_puts(s, "\n");
210 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
211 
212 	return 0;
213 }
214 
215 static int dpu_debugfs_open_regset32(struct inode *inode,
216 		struct file *file)
217 {
218 	return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
219 }
220 
221 static const struct file_operations dpu_fops_regset32 = {
222 	.open =		dpu_debugfs_open_regset32,
223 	.read =		seq_read,
224 	.llseek =	seq_lseek,
225 	.release =	single_release,
226 };
227 
228 void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
229 		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
230 {
231 	if (regset) {
232 		regset->offset = offset;
233 		regset->blk_len = length;
234 		regset->dpu_kms = dpu_kms;
235 	}
236 }
237 
238 void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
239 		void *parent, struct dpu_debugfs_regset32 *regset)
240 {
241 	if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
242 		return NULL;
243 
244 	/* make sure offset is a multiple of 4 */
245 	regset->offset = round_down(regset->offset, 4);
246 
247 	return debugfs_create_file(name, mode, parent,
248 			regset, &dpu_fops_regset32);
249 }
250 
251 static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
252 {
253 	void *p;
254 	int rc;
255 
256 	p = dpu_hw_util_get_log_mask_ptr();
257 
258 	if (!dpu_kms || !p)
259 		return -EINVAL;
260 
261 	dpu_kms->debugfs_root = debugfs_create_dir("debug",
262 					   dpu_kms->dev->primary->debugfs_root);
263 	if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
264 		DRM_ERROR("debugfs create_dir failed %ld\n",
265 			  PTR_ERR(dpu_kms->debugfs_root));
266 		return PTR_ERR(dpu_kms->debugfs_root);
267 	}
268 
269 	rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
270 	if (rc) {
271 		DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
272 		return rc;
273 	}
274 
275 	/* allow root to be NULL */
276 	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
277 
278 	(void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
279 	(void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
280 	(void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
281 
282 	rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
283 	if (rc) {
284 		DPU_ERROR("failed to init perf %d\n", rc);
285 		return rc;
286 	}
287 
288 	return 0;
289 }
290 
291 static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
292 {
293 	/* don't need to NULL check debugfs_root */
294 	if (dpu_kms) {
295 		dpu_debugfs_vbif_destroy(dpu_kms);
296 		dpu_debugfs_danger_destroy(dpu_kms);
297 		dpu_debugfs_core_irq_destroy(dpu_kms);
298 		debugfs_remove_recursive(dpu_kms->debugfs_root);
299 	}
300 }
301 #else
302 static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
303 {
304 }
305 #endif
306 
307 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
308 {
309 	return dpu_crtc_vblank(crtc, true);
310 }
311 
312 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
313 {
314 	dpu_crtc_vblank(crtc, false);
315 }
316 
317 static void dpu_kms_prepare_commit(struct msm_kms *kms,
318 		struct drm_atomic_state *state)
319 {
320 	struct dpu_kms *dpu_kms;
321 	struct msm_drm_private *priv;
322 	struct drm_device *dev;
323 	struct drm_encoder *encoder;
324 
325 	if (!kms)
326 		return;
327 	dpu_kms = to_dpu_kms(kms);
328 	dev = dpu_kms->dev;
329 
330 	if (!dev || !dev->dev_private)
331 		return;
332 	priv = dev->dev_private;
333 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
334 
335 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
336 		if (encoder->crtc != NULL)
337 			dpu_encoder_prepare_commit(encoder);
338 }
339 
340 /*
341  * Override the encoder enable since we need to setup the inline rotator and do
342  * some crtc magic before enabling any bridge that might be present.
343  */
344 void dpu_kms_encoder_enable(struct drm_encoder *encoder)
345 {
346 	const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
347 	struct drm_crtc *crtc = encoder->crtc;
348 
349 	/* Forward this enable call to the commit hook */
350 	if (funcs && funcs->commit)
351 		funcs->commit(encoder);
352 
353 	if (crtc && crtc->state->active) {
354 		trace_dpu_kms_enc_enable(DRMID(crtc));
355 		dpu_crtc_commit_kickoff(crtc);
356 	}
357 }
358 
359 static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
360 {
361 	struct drm_crtc *crtc;
362 	struct drm_crtc_state *crtc_state;
363 	int i;
364 
365 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
366 		/* If modeset is required, kickoff is run in encoder_enable */
367 		if (drm_atomic_crtc_needs_modeset(crtc_state))
368 			continue;
369 
370 		if (crtc->state->active) {
371 			trace_dpu_kms_commit(DRMID(crtc));
372 			dpu_crtc_commit_kickoff(crtc);
373 		}
374 	}
375 }
376 
377 static void dpu_kms_complete_commit(struct msm_kms *kms,
378 		struct drm_atomic_state *old_state)
379 {
380 	struct dpu_kms *dpu_kms;
381 	struct msm_drm_private *priv;
382 	struct drm_crtc *crtc;
383 	struct drm_crtc_state *old_crtc_state;
384 	int i;
385 
386 	if (!kms || !old_state)
387 		return;
388 	dpu_kms = to_dpu_kms(kms);
389 
390 	if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
391 		return;
392 	priv = dpu_kms->dev->dev_private;
393 
394 	DPU_ATRACE_BEGIN("kms_complete_commit");
395 
396 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
397 		dpu_crtc_complete_commit(crtc, old_crtc_state);
398 
399 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
400 
401 	DPU_ATRACE_END("kms_complete_commit");
402 }
403 
404 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
405 		struct drm_crtc *crtc)
406 {
407 	struct drm_encoder *encoder;
408 	struct drm_device *dev;
409 	int ret;
410 
411 	if (!kms || !crtc || !crtc->state) {
412 		DPU_ERROR("invalid params\n");
413 		return;
414 	}
415 
416 	dev = crtc->dev;
417 
418 	if (!crtc->state->enable) {
419 		DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
420 		return;
421 	}
422 
423 	if (!crtc->state->active) {
424 		DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
425 		return;
426 	}
427 
428 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
429 		if (encoder->crtc != crtc)
430 			continue;
431 		/*
432 		 * Wait for post-flush if necessary to delay before
433 		 * plane_cleanup. For example, wait for vsync in case of video
434 		 * mode panels. This may be a no-op for command mode panels.
435 		 */
436 		trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
437 		ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
438 		if (ret && ret != -EWOULDBLOCK) {
439 			DPU_ERROR("wait for commit done returned %d\n", ret);
440 			break;
441 		}
442 	}
443 }
444 
445 static void _dpu_kms_initialize_dsi(struct drm_device *dev,
446 				    struct msm_drm_private *priv,
447 				    struct dpu_kms *dpu_kms)
448 {
449 	struct drm_encoder *encoder = NULL;
450 	int i, rc;
451 
452 	/*TODO: Support two independent DSI connectors */
453 	encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
454 	if (IS_ERR_OR_NULL(encoder)) {
455 		DPU_ERROR("encoder init failed for dsi display\n");
456 		return;
457 	}
458 
459 	priv->encoders[priv->num_encoders++] = encoder;
460 
461 	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
462 		if (!priv->dsi[i]) {
463 			DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
464 			return;
465 		}
466 
467 		rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
468 		if (rc) {
469 			DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
470 				i, rc);
471 			continue;
472 		}
473 	}
474 }
475 
476 /**
477  * _dpu_kms_setup_displays - create encoders, bridges and connectors
478  *                           for underlying displays
479  * @dev:        Pointer to drm device structure
480  * @priv:       Pointer to private drm device data
481  * @dpu_kms:    Pointer to dpu kms structure
482  * Returns:     Zero on success
483  */
484 static void _dpu_kms_setup_displays(struct drm_device *dev,
485 				    struct msm_drm_private *priv,
486 				    struct dpu_kms *dpu_kms)
487 {
488 	_dpu_kms_initialize_dsi(dev, priv, dpu_kms);
489 
490 	/**
491 	 * Extend this function to initialize other
492 	 * types of displays
493 	 */
494 }
495 
496 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
497 {
498 	struct msm_drm_private *priv;
499 	int i;
500 
501 	if (!dpu_kms) {
502 		DPU_ERROR("invalid dpu_kms\n");
503 		return;
504 	} else if (!dpu_kms->dev) {
505 		DPU_ERROR("invalid dev\n");
506 		return;
507 	} else if (!dpu_kms->dev->dev_private) {
508 		DPU_ERROR("invalid dev_private\n");
509 		return;
510 	}
511 	priv = dpu_kms->dev->dev_private;
512 
513 	for (i = 0; i < priv->num_crtcs; i++)
514 		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
515 	priv->num_crtcs = 0;
516 
517 	for (i = 0; i < priv->num_planes; i++)
518 		priv->planes[i]->funcs->destroy(priv->planes[i]);
519 	priv->num_planes = 0;
520 
521 	for (i = 0; i < priv->num_connectors; i++)
522 		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
523 	priv->num_connectors = 0;
524 
525 	for (i = 0; i < priv->num_encoders; i++)
526 		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
527 	priv->num_encoders = 0;
528 }
529 
530 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
531 {
532 	struct drm_device *dev;
533 	struct drm_plane *primary_planes[MAX_PLANES], *plane;
534 	struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
535 	struct drm_crtc *crtc;
536 
537 	struct msm_drm_private *priv;
538 	struct dpu_mdss_cfg *catalog;
539 
540 	int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
541 	int max_crtc_count;
542 
543 	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
544 		DPU_ERROR("invalid dpu_kms\n");
545 		return -EINVAL;
546 	}
547 
548 	dev = dpu_kms->dev;
549 	priv = dev->dev_private;
550 	catalog = dpu_kms->catalog;
551 
552 	/*
553 	 * Create encoder and query display drivers to create
554 	 * bridges and connectors
555 	 */
556 	_dpu_kms_setup_displays(dev, priv, dpu_kms);
557 
558 	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
559 
560 	/* Create the planes, keeping track of one primary/cursor per crtc */
561 	for (i = 0; i < catalog->sspp_count; i++) {
562 		enum drm_plane_type type;
563 
564 		if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
565 			&& cursor_planes_idx < max_crtc_count)
566 			type = DRM_PLANE_TYPE_CURSOR;
567 		else if (primary_planes_idx < max_crtc_count)
568 			type = DRM_PLANE_TYPE_PRIMARY;
569 		else
570 			type = DRM_PLANE_TYPE_OVERLAY;
571 
572 		DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
573 			  type, catalog->sspp[i].features,
574 			  catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
575 
576 		plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
577 				       (1UL << max_crtc_count) - 1, 0);
578 		if (IS_ERR(plane)) {
579 			DPU_ERROR("dpu_plane_init failed\n");
580 			ret = PTR_ERR(plane);
581 			goto fail;
582 		}
583 		priv->planes[priv->num_planes++] = plane;
584 
585 		if (type == DRM_PLANE_TYPE_CURSOR)
586 			cursor_planes[cursor_planes_idx++] = plane;
587 		else if (type == DRM_PLANE_TYPE_PRIMARY)
588 			primary_planes[primary_planes_idx++] = plane;
589 	}
590 
591 	max_crtc_count = min(max_crtc_count, primary_planes_idx);
592 
593 	/* Create one CRTC per encoder */
594 	for (i = 0; i < max_crtc_count; i++) {
595 		crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
596 		if (IS_ERR(crtc)) {
597 			ret = PTR_ERR(crtc);
598 			goto fail;
599 		}
600 		priv->crtcs[priv->num_crtcs++] = crtc;
601 	}
602 
603 	/* All CRTCs are compatible with all encoders */
604 	for (i = 0; i < priv->num_encoders; i++)
605 		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
606 
607 	return 0;
608 fail:
609 	_dpu_kms_drm_obj_destroy(dpu_kms);
610 	return ret;
611 }
612 
613 #ifdef CONFIG_DEBUG_FS
614 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
615 {
616 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
617 	struct drm_device *dev;
618 	int rc;
619 
620 	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
621 		DPU_ERROR("invalid dpu_kms\n");
622 		return -EINVAL;
623 	}
624 
625 	dev = dpu_kms->dev;
626 
627 	rc = _dpu_debugfs_init(dpu_kms);
628 	if (rc)
629 		DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
630 
631 	return rc;
632 }
633 #endif
634 
635 static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
636 		struct drm_encoder *encoder)
637 {
638 	return rate;
639 }
640 
641 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
642 {
643 	struct drm_device *dev;
644 	int i;
645 
646 	dev = dpu_kms->dev;
647 	if (!dev)
648 		return;
649 
650 	if (dpu_kms->hw_intr)
651 		dpu_hw_intr_destroy(dpu_kms->hw_intr);
652 	dpu_kms->hw_intr = NULL;
653 
654 	if (dpu_kms->power_event)
655 		dpu_power_handle_unregister_event(
656 				&dpu_kms->phandle, dpu_kms->power_event);
657 
658 	/* safe to call these more than once during shutdown */
659 	_dpu_debugfs_destroy(dpu_kms);
660 	_dpu_kms_mmu_destroy(dpu_kms);
661 
662 	if (dpu_kms->catalog) {
663 		for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
664 			u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
665 
666 			if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
667 				dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
668 		}
669 	}
670 
671 	if (dpu_kms->rm_init)
672 		dpu_rm_destroy(&dpu_kms->rm);
673 	dpu_kms->rm_init = false;
674 
675 	if (dpu_kms->catalog)
676 		dpu_hw_catalog_deinit(dpu_kms->catalog);
677 	dpu_kms->catalog = NULL;
678 
679 	if (dpu_kms->core_client)
680 		dpu_power_client_destroy(&dpu_kms->phandle,
681 			dpu_kms->core_client);
682 	dpu_kms->core_client = NULL;
683 
684 	if (dpu_kms->vbif[VBIF_NRT])
685 		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
686 	dpu_kms->vbif[VBIF_NRT] = NULL;
687 
688 	if (dpu_kms->vbif[VBIF_RT])
689 		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
690 	dpu_kms->vbif[VBIF_RT] = NULL;
691 
692 	if (dpu_kms->mmio)
693 		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
694 	dpu_kms->mmio = NULL;
695 }
696 
697 static void dpu_kms_destroy(struct msm_kms *kms)
698 {
699 	struct dpu_kms *dpu_kms;
700 
701 	if (!kms) {
702 		DPU_ERROR("invalid kms\n");
703 		return;
704 	}
705 
706 	dpu_kms = to_dpu_kms(kms);
707 
708 	dpu_dbg_destroy();
709 	_dpu_kms_hw_destroy(dpu_kms);
710 }
711 
712 static int dpu_kms_pm_suspend(struct device *dev)
713 {
714 	struct drm_device *ddev;
715 	struct drm_modeset_acquire_ctx ctx;
716 	struct drm_atomic_state *state;
717 	struct dpu_kms *dpu_kms;
718 	int ret = 0, num_crtcs = 0;
719 
720 	if (!dev)
721 		return -EINVAL;
722 
723 	ddev = dev_get_drvdata(dev);
724 	if (!ddev || !ddev_to_msm_kms(ddev))
725 		return -EINVAL;
726 
727 	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
728 
729 	/* disable hot-plug polling */
730 	drm_kms_helper_poll_disable(ddev);
731 
732 	/* acquire modeset lock(s) */
733 	drm_modeset_acquire_init(&ctx, 0);
734 
735 retry:
736 	DPU_ATRACE_BEGIN("kms_pm_suspend");
737 
738 	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
739 	if (ret)
740 		goto unlock;
741 
742 	/* save current state for resume */
743 	if (dpu_kms->suspend_state)
744 		drm_atomic_state_put(dpu_kms->suspend_state);
745 	dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
746 	if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
747 		DRM_ERROR("failed to back up suspend state\n");
748 		dpu_kms->suspend_state = NULL;
749 		goto unlock;
750 	}
751 
752 	/* create atomic state to disable all CRTCs */
753 	state = drm_atomic_state_alloc(ddev);
754 	if (IS_ERR_OR_NULL(state)) {
755 		DRM_ERROR("failed to allocate crtc disable state\n");
756 		goto unlock;
757 	}
758 
759 	state->acquire_ctx = &ctx;
760 
761 	/* check for nothing to do */
762 	if (num_crtcs == 0) {
763 		DRM_DEBUG("all crtcs are already in the off state\n");
764 		drm_atomic_state_put(state);
765 		goto suspended;
766 	}
767 
768 	/* commit the "disable all" state */
769 	ret = drm_atomic_commit(state);
770 	if (ret < 0) {
771 		DRM_ERROR("failed to disable crtcs, %d\n", ret);
772 		drm_atomic_state_put(state);
773 		goto unlock;
774 	}
775 
776 suspended:
777 	dpu_kms->suspend_block = true;
778 
779 unlock:
780 	if (ret == -EDEADLK) {
781 		drm_modeset_backoff(&ctx);
782 		goto retry;
783 	}
784 	drm_modeset_drop_locks(&ctx);
785 	drm_modeset_acquire_fini(&ctx);
786 
787 	DPU_ATRACE_END("kms_pm_suspend");
788 	return 0;
789 }
790 
791 static int dpu_kms_pm_resume(struct device *dev)
792 {
793 	struct drm_device *ddev;
794 	struct dpu_kms *dpu_kms;
795 	int ret;
796 
797 	if (!dev)
798 		return -EINVAL;
799 
800 	ddev = dev_get_drvdata(dev);
801 	if (!ddev || !ddev_to_msm_kms(ddev))
802 		return -EINVAL;
803 
804 	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
805 
806 	DPU_ATRACE_BEGIN("kms_pm_resume");
807 
808 	drm_mode_config_reset(ddev);
809 
810 	drm_modeset_lock_all(ddev);
811 
812 	dpu_kms->suspend_block = false;
813 
814 	if (dpu_kms->suspend_state) {
815 		dpu_kms->suspend_state->acquire_ctx =
816 			ddev->mode_config.acquire_ctx;
817 		ret = drm_atomic_commit(dpu_kms->suspend_state);
818 		if (ret < 0) {
819 			DRM_ERROR("failed to restore state, %d\n", ret);
820 			drm_atomic_state_put(dpu_kms->suspend_state);
821 		}
822 		dpu_kms->suspend_state = NULL;
823 	}
824 	drm_modeset_unlock_all(ddev);
825 
826 	/* enable hot-plug polling */
827 	drm_kms_helper_poll_enable(ddev);
828 
829 	DPU_ATRACE_END("kms_pm_resume");
830 	return 0;
831 }
832 
833 static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
834 				 struct drm_encoder *encoder,
835 				 bool cmd_mode)
836 {
837 	struct msm_display_info info;
838 	struct msm_drm_private *priv = encoder->dev->dev_private;
839 	int i, rc = 0;
840 
841 	memset(&info, 0, sizeof(info));
842 
843 	info.intf_type = encoder->encoder_type;
844 	info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
845 			MSM_DISPLAY_CAP_VID_MODE;
846 
847 	/* TODO: No support for DSI swap */
848 	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
849 		if (priv->dsi[i]) {
850 			info.h_tile_instance[info.num_of_h_tiles] = i;
851 			info.num_of_h_tiles++;
852 		}
853 	}
854 
855 	rc = dpu_encoder_setup(encoder->dev, encoder, &info);
856 	if (rc)
857 		DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
858 			encoder->base.id, rc);
859 }
860 
861 static const struct msm_kms_funcs kms_funcs = {
862 	.hw_init         = dpu_kms_hw_init,
863 	.irq_preinstall  = dpu_irq_preinstall,
864 	.irq_postinstall = dpu_irq_postinstall,
865 	.irq_uninstall   = dpu_irq_uninstall,
866 	.irq             = dpu_irq,
867 	.prepare_commit  = dpu_kms_prepare_commit,
868 	.commit          = dpu_kms_commit,
869 	.complete_commit = dpu_kms_complete_commit,
870 	.wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
871 	.enable_vblank   = dpu_kms_enable_vblank,
872 	.disable_vblank  = dpu_kms_disable_vblank,
873 	.check_modified_format = dpu_format_check_modified_format,
874 	.get_format      = dpu_get_msm_format,
875 	.round_pixclk    = dpu_kms_round_pixclk,
876 	.pm_suspend      = dpu_kms_pm_suspend,
877 	.pm_resume       = dpu_kms_pm_resume,
878 	.destroy         = dpu_kms_destroy,
879 	.set_encoder_mode = _dpu_kms_set_encoder_mode,
880 #ifdef CONFIG_DEBUG_FS
881 	.debugfs_init    = dpu_kms_debugfs_init,
882 #endif
883 };
884 
885 /* the caller api needs to turn on clock before calling it */
886 static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
887 {
888 	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
889 }
890 
891 static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
892 {
893 	struct msm_mmu *mmu;
894 
895 	mmu = dpu_kms->base.aspace->mmu;
896 
897 	mmu->funcs->detach(mmu, (const char **)iommu_ports,
898 			ARRAY_SIZE(iommu_ports));
899 	msm_gem_address_space_put(dpu_kms->base.aspace);
900 
901 	return 0;
902 }
903 
904 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
905 {
906 	struct iommu_domain *domain;
907 	struct msm_gem_address_space *aspace;
908 	int ret;
909 
910 	domain = iommu_domain_alloc(&platform_bus_type);
911 	if (!domain)
912 		return 0;
913 
914 	aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
915 			domain, "dpu1");
916 	if (IS_ERR(aspace)) {
917 		ret = PTR_ERR(aspace);
918 		goto fail;
919 	}
920 
921 	dpu_kms->base.aspace = aspace;
922 
923 	ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
924 			ARRAY_SIZE(iommu_ports));
925 	if (ret) {
926 		DPU_ERROR("failed to attach iommu %d\n", ret);
927 		msm_gem_address_space_put(aspace);
928 		goto fail;
929 	}
930 
931 	return 0;
932 fail:
933 	_dpu_kms_mmu_destroy(dpu_kms);
934 
935 	return ret;
936 }
937 
938 static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
939 		char *clock_name)
940 {
941 	struct dss_module_power *mp = &dpu_kms->mp;
942 	int i;
943 
944 	for (i = 0; i < mp->num_clk; i++) {
945 		if (!strcmp(mp->clk_config[i].clk_name, clock_name))
946 			return &mp->clk_config[i];
947 	}
948 
949 	return NULL;
950 }
951 
952 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
953 {
954 	struct dss_clk *clk;
955 
956 	clk = _dpu_kms_get_clk(dpu_kms, clock_name);
957 	if (!clk)
958 		return -EINVAL;
959 
960 	return clk_get_rate(clk->clk);
961 }
962 
963 static void dpu_kms_handle_power_event(u32 event_type, void *usr)
964 {
965 	struct dpu_kms *dpu_kms = usr;
966 
967 	if (!dpu_kms)
968 		return;
969 
970 	dpu_vbif_init_memtypes(dpu_kms);
971 }
972 
973 static int dpu_kms_hw_init(struct msm_kms *kms)
974 {
975 	struct dpu_kms *dpu_kms;
976 	struct drm_device *dev;
977 	struct msm_drm_private *priv;
978 	int i, rc = -EINVAL;
979 
980 	if (!kms) {
981 		DPU_ERROR("invalid kms\n");
982 		goto end;
983 	}
984 
985 	dpu_kms = to_dpu_kms(kms);
986 	dev = dpu_kms->dev;
987 	if (!dev) {
988 		DPU_ERROR("invalid device\n");
989 		goto end;
990 	}
991 
992 	rc = dpu_dbg_init(&dpu_kms->pdev->dev);
993 	if (rc) {
994 		DRM_ERROR("failed to init dpu dbg: %d\n", rc);
995 		goto end;
996 	}
997 
998 	priv = dev->dev_private;
999 	if (!priv) {
1000 		DPU_ERROR("invalid private data\n");
1001 		goto dbg_destroy;
1002 	}
1003 
1004 	dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
1005 	if (IS_ERR(dpu_kms->mmio)) {
1006 		rc = PTR_ERR(dpu_kms->mmio);
1007 		DPU_ERROR("mdp register memory map failed: %d\n", rc);
1008 		dpu_kms->mmio = NULL;
1009 		goto error;
1010 	}
1011 	DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1012 	dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
1013 
1014 	dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
1015 	if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1016 		rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1017 		DPU_ERROR("vbif register memory map failed: %d\n", rc);
1018 		dpu_kms->vbif[VBIF_RT] = NULL;
1019 		goto error;
1020 	}
1021 	dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
1022 	dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
1023 	if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1024 		dpu_kms->vbif[VBIF_NRT] = NULL;
1025 		DPU_DEBUG("VBIF NRT is not defined");
1026 	} else {
1027 		dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
1028 							     "vbif_nrt");
1029 	}
1030 
1031 	dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
1032 	if (IS_ERR(dpu_kms->reg_dma)) {
1033 		dpu_kms->reg_dma = NULL;
1034 		DPU_DEBUG("REG_DMA is not defined");
1035 	} else {
1036 		dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
1037 	}
1038 
1039 	dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
1040 					"core");
1041 	if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
1042 		rc = PTR_ERR(dpu_kms->core_client);
1043 		if (!dpu_kms->core_client)
1044 			rc = -EINVAL;
1045 		DPU_ERROR("dpu power client create failed: %d\n", rc);
1046 		dpu_kms->core_client = NULL;
1047 		goto error;
1048 	}
1049 
1050 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
1051 
1052 	_dpu_kms_core_hw_rev_init(dpu_kms);
1053 
1054 	pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
1055 
1056 	dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
1057 	if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
1058 		rc = PTR_ERR(dpu_kms->catalog);
1059 		if (!dpu_kms->catalog)
1060 			rc = -EINVAL;
1061 		DPU_ERROR("catalog init failed: %d\n", rc);
1062 		dpu_kms->catalog = NULL;
1063 		goto power_error;
1064 	}
1065 
1066 	dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
1067 
1068 	/*
1069 	 * Now we need to read the HW catalog and initialize resources such as
1070 	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1071 	 */
1072 	rc = _dpu_kms_mmu_init(dpu_kms);
1073 	if (rc) {
1074 		DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1075 		goto power_error;
1076 	}
1077 
1078 	rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
1079 			dpu_kms->dev);
1080 	if (rc) {
1081 		DPU_ERROR("rm init failed: %d\n", rc);
1082 		goto power_error;
1083 	}
1084 
1085 	dpu_kms->rm_init = true;
1086 
1087 	dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
1088 	if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
1089 		rc = PTR_ERR(dpu_kms->hw_mdp);
1090 		if (!dpu_kms->hw_mdp)
1091 			rc = -EINVAL;
1092 		DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1093 		dpu_kms->hw_mdp = NULL;
1094 		goto power_error;
1095 	}
1096 
1097 	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1098 		u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
1099 
1100 		dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
1101 				dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
1102 		if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
1103 			rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
1104 			if (!dpu_kms->hw_vbif[vbif_idx])
1105 				rc = -EINVAL;
1106 			DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
1107 			dpu_kms->hw_vbif[vbif_idx] = NULL;
1108 			goto power_error;
1109 		}
1110 	}
1111 
1112 	rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
1113 			&dpu_kms->phandle,
1114 			_dpu_kms_get_clk(dpu_kms, "core"));
1115 	if (rc) {
1116 		DPU_ERROR("failed to init perf %d\n", rc);
1117 		goto perf_err;
1118 	}
1119 
1120 	dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
1121 	if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
1122 		rc = PTR_ERR(dpu_kms->hw_intr);
1123 		DPU_ERROR("hw_intr init failed: %d\n", rc);
1124 		dpu_kms->hw_intr = NULL;
1125 		goto hw_intr_init_err;
1126 	}
1127 
1128 	/*
1129 	 * _dpu_kms_drm_obj_init should create the DRM related objects
1130 	 * i.e. CRTCs, planes, encoders, connectors and so forth
1131 	 */
1132 	rc = _dpu_kms_drm_obj_init(dpu_kms);
1133 	if (rc) {
1134 		DPU_ERROR("modeset init failed: %d\n", rc);
1135 		goto drm_obj_init_err;
1136 	}
1137 
1138 	dev->mode_config.min_width = 0;
1139 	dev->mode_config.min_height = 0;
1140 
1141 	/*
1142 	 * max crtc width is equal to the max mixer width * 2 and max height is
1143 	 * is 4K
1144 	 */
1145 	dev->mode_config.max_width =
1146 			dpu_kms->catalog->caps->max_mixer_width * 2;
1147 	dev->mode_config.max_height = 4096;
1148 
1149 	/*
1150 	 * Support format modifiers for compression etc.
1151 	 */
1152 	dev->mode_config.allow_fb_modifiers = true;
1153 
1154 	/*
1155 	 * Handle (re)initializations during power enable
1156 	 */
1157 	dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
1158 	dpu_kms->power_event = dpu_power_handle_register_event(
1159 			&dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
1160 			dpu_kms_handle_power_event, dpu_kms, "kms");
1161 
1162 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
1163 
1164 	return 0;
1165 
1166 drm_obj_init_err:
1167 	dpu_core_perf_destroy(&dpu_kms->perf);
1168 hw_intr_init_err:
1169 perf_err:
1170 power_error:
1171 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
1172 error:
1173 	_dpu_kms_hw_destroy(dpu_kms);
1174 dbg_destroy:
1175 	dpu_dbg_destroy();
1176 end:
1177 	return rc;
1178 }
1179 
1180 struct msm_kms *dpu_kms_init(struct drm_device *dev)
1181 {
1182 	struct msm_drm_private *priv;
1183 	struct dpu_kms *dpu_kms;
1184 	int irq;
1185 
1186 	if (!dev || !dev->dev_private) {
1187 		DPU_ERROR("drm device node invalid\n");
1188 		return ERR_PTR(-EINVAL);
1189 	}
1190 
1191 	priv = dev->dev_private;
1192 	dpu_kms = to_dpu_kms(priv->kms);
1193 
1194 	irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1195 	if (irq < 0) {
1196 		DPU_ERROR("failed to get irq: %d\n", irq);
1197 		return ERR_PTR(irq);
1198 	}
1199 	dpu_kms->base.irq = irq;
1200 
1201 	return &dpu_kms->base;
1202 }
1203 
1204 static int dpu_bind(struct device *dev, struct device *master, void *data)
1205 {
1206 	struct drm_device *ddev = dev_get_drvdata(master);
1207 	struct platform_device *pdev = to_platform_device(dev);
1208 	struct msm_drm_private *priv = ddev->dev_private;
1209 	struct dpu_kms *dpu_kms;
1210 	struct dss_module_power *mp;
1211 	int ret = 0;
1212 
1213 	dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1214 	if (!dpu_kms)
1215 		return -ENOMEM;
1216 
1217 	mp = &dpu_kms->mp;
1218 	ret = msm_dss_parse_clock(pdev, mp);
1219 	if (ret) {
1220 		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1221 		return ret;
1222 	}
1223 
1224 	dpu_power_resource_init(pdev, &dpu_kms->phandle);
1225 
1226 	platform_set_drvdata(pdev, dpu_kms);
1227 
1228 	msm_kms_init(&dpu_kms->base, &kms_funcs);
1229 	dpu_kms->dev = ddev;
1230 	dpu_kms->pdev = pdev;
1231 
1232 	pm_runtime_enable(&pdev->dev);
1233 	dpu_kms->rpm_enabled = true;
1234 
1235 	priv->kms = &dpu_kms->base;
1236 	return ret;
1237 }
1238 
1239 static void dpu_unbind(struct device *dev, struct device *master, void *data)
1240 {
1241 	struct platform_device *pdev = to_platform_device(dev);
1242 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1243 	struct dss_module_power *mp = &dpu_kms->mp;
1244 
1245 	dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
1246 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
1247 	devm_kfree(&pdev->dev, mp->clk_config);
1248 	mp->num_clk = 0;
1249 
1250 	if (dpu_kms->rpm_enabled)
1251 		pm_runtime_disable(&pdev->dev);
1252 }
1253 
1254 static const struct component_ops dpu_ops = {
1255 	.bind   = dpu_bind,
1256 	.unbind = dpu_unbind,
1257 };
1258 
1259 static int dpu_dev_probe(struct platform_device *pdev)
1260 {
1261 	return component_add(&pdev->dev, &dpu_ops);
1262 }
1263 
1264 static int dpu_dev_remove(struct platform_device *pdev)
1265 {
1266 	component_del(&pdev->dev, &dpu_ops);
1267 	return 0;
1268 }
1269 
1270 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1271 {
1272 	int rc = -1;
1273 	struct platform_device *pdev = to_platform_device(dev);
1274 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1275 	struct drm_device *ddev;
1276 	struct dss_module_power *mp = &dpu_kms->mp;
1277 
1278 	ddev = dpu_kms->dev;
1279 	if (!ddev) {
1280 		DPU_ERROR("invalid drm_device\n");
1281 		goto exit;
1282 	}
1283 
1284 	rc = dpu_power_resource_enable(&dpu_kms->phandle,
1285 			dpu_kms->core_client, false);
1286 	if (rc)
1287 		DPU_ERROR("resource disable failed: %d\n", rc);
1288 
1289 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
1290 	if (rc)
1291 		DPU_ERROR("clock disable failed rc:%d\n", rc);
1292 
1293 exit:
1294 	return rc;
1295 }
1296 
1297 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1298 {
1299 	int rc = -1;
1300 	struct platform_device *pdev = to_platform_device(dev);
1301 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1302 	struct drm_device *ddev;
1303 	struct dss_module_power *mp = &dpu_kms->mp;
1304 
1305 	ddev = dpu_kms->dev;
1306 	if (!ddev) {
1307 		DPU_ERROR("invalid drm_device\n");
1308 		goto exit;
1309 	}
1310 
1311 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
1312 	if (rc) {
1313 		DPU_ERROR("clock enable failed rc:%d\n", rc);
1314 		goto exit;
1315 	}
1316 
1317 	rc = dpu_power_resource_enable(&dpu_kms->phandle,
1318 			dpu_kms->core_client, true);
1319 	if (rc)
1320 		DPU_ERROR("resource enable failed: %d\n", rc);
1321 
1322 exit:
1323 	return rc;
1324 }
1325 
1326 static const struct dev_pm_ops dpu_pm_ops = {
1327 	SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1328 };
1329 
1330 static const struct of_device_id dpu_dt_match[] = {
1331 	{ .compatible = "qcom,sdm845-dpu", },
1332 	{}
1333 };
1334 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1335 
1336 static struct platform_driver dpu_driver = {
1337 	.probe = dpu_dev_probe,
1338 	.remove = dpu_dev_remove,
1339 	.driver = {
1340 		.name = "msm_dpu",
1341 		.of_match_table = dpu_dt_match,
1342 		.pm = &dpu_pm_ops,
1343 	},
1344 };
1345 
1346 void __init msm_dpu_register(void)
1347 {
1348 	platform_driver_register(&dpu_driver);
1349 }
1350 
1351 void __exit msm_dpu_unregister(void)
1352 {
1353 	platform_driver_unregister(&dpu_driver);
1354 }
1355