1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
214be3200SRob Clark /*
314be3200SRob Clark * Copyright (c) 2014, The Linux Foundation. All rights reserved.
414be3200SRob Clark * Copyright (C) 2013 Red Hat
514be3200SRob Clark * Author: Rob Clark <robdclark@gmail.com>
614be3200SRob Clark */
714be3200SRob Clark
8feea39a8SSam Ravnborg #include <linux/delay.h>
997f90e1bSGeorgi Djakov #include <linux/interconnect.h>
1014be3200SRob Clark #include <linux/of_irq.h>
1114be3200SRob Clark
12feea39a8SSam Ravnborg #include <drm/drm_debugfs.h>
13feea39a8SSam Ravnborg #include <drm/drm_drv.h>
14feea39a8SSam Ravnborg #include <drm/drm_file.h>
15feea39a8SSam Ravnborg #include <drm/drm_vblank.h>
16feea39a8SSam Ravnborg
1714be3200SRob Clark #include "msm_drv.h"
1814be3200SRob Clark #include "msm_gem.h"
1914be3200SRob Clark #include "msm_mmu.h"
2014be3200SRob Clark #include "mdp5_kms.h"
2114be3200SRob Clark
mdp5_hw_init(struct msm_kms * kms)2214be3200SRob Clark static int mdp5_hw_init(struct msm_kms *kms)
2314be3200SRob Clark {
2414be3200SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
2514be3200SRob Clark struct device *dev = &mdp5_kms->pdev->dev;
2614be3200SRob Clark unsigned long flags;
2714be3200SRob Clark
2814be3200SRob Clark pm_runtime_get_sync(dev);
2914be3200SRob Clark
3014be3200SRob Clark /* Magic unknown register writes:
3114be3200SRob Clark *
3214be3200SRob Clark * W VBIF:0x004 00000001 (mdss_mdp.c:839)
3314be3200SRob Clark * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
3414be3200SRob Clark * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
3514be3200SRob Clark * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
3614be3200SRob Clark * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
3714be3200SRob Clark * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
3814be3200SRob Clark * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
3914be3200SRob Clark * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
4014be3200SRob Clark * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
4114be3200SRob Clark *
4214be3200SRob Clark * Downstream fbdev driver gets these register offsets/values
4314be3200SRob Clark * from DT.. not really sure what these registers are or if
4414be3200SRob Clark * different values for different boards/SoC's, etc. I guess
4514be3200SRob Clark * they are the golden registers.
4614be3200SRob Clark *
4714be3200SRob Clark * Not setting these does not seem to cause any problem. But
4814be3200SRob Clark * we may be getting lucky with the bootloader initializing
4914be3200SRob Clark * them for us. OTOH, if we can always count on the bootloader
5014be3200SRob Clark * setting the golden registers, then perhaps we don't need to
5114be3200SRob Clark * care.
5214be3200SRob Clark */
5314be3200SRob Clark
5414be3200SRob Clark spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
5514be3200SRob Clark mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
5614be3200SRob Clark spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
5714be3200SRob Clark
5814be3200SRob Clark mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
5914be3200SRob Clark
6014be3200SRob Clark pm_runtime_put_sync(dev);
6114be3200SRob Clark
6214be3200SRob Clark return 0;
6314be3200SRob Clark }
6414be3200SRob Clark
658d58ef34SArchit Taneja /* Global/shared object state funcs */
668d58ef34SArchit Taneja
678d58ef34SArchit Taneja /*
688d58ef34SArchit Taneja * This is a helper that returns the private state currently in operation.
698d58ef34SArchit Taneja * Note that this would return the "old_state" if called in the atomic check
708d58ef34SArchit Taneja * path, and the "new_state" after the atomic swap has been done.
718d58ef34SArchit Taneja */
728d58ef34SArchit Taneja struct mdp5_global_state *
mdp5_get_existing_global_state(struct mdp5_kms * mdp5_kms)738d58ef34SArchit Taneja mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
748d58ef34SArchit Taneja {
758d58ef34SArchit Taneja return to_mdp5_global_state(mdp5_kms->glob_state.state);
768d58ef34SArchit Taneja }
778d58ef34SArchit Taneja
788d58ef34SArchit Taneja /*
798d58ef34SArchit Taneja * This acquires the modeset lock set aside for global state, creates
808d58ef34SArchit Taneja * a new duplicated private object state.
818d58ef34SArchit Taneja */
mdp5_get_global_state(struct drm_atomic_state * s)828d58ef34SArchit Taneja struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
838d58ef34SArchit Taneja {
848d58ef34SArchit Taneja struct msm_drm_private *priv = s->dev->dev_private;
858d58ef34SArchit Taneja struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
868d58ef34SArchit Taneja struct drm_private_state *priv_state;
878d58ef34SArchit Taneja int ret;
888d58ef34SArchit Taneja
898d58ef34SArchit Taneja ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
908d58ef34SArchit Taneja if (ret)
918d58ef34SArchit Taneja return ERR_PTR(ret);
928d58ef34SArchit Taneja
938d58ef34SArchit Taneja priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
948d58ef34SArchit Taneja if (IS_ERR(priv_state))
958d58ef34SArchit Taneja return ERR_CAST(priv_state);
968d58ef34SArchit Taneja
978d58ef34SArchit Taneja return to_mdp5_global_state(priv_state);
988d58ef34SArchit Taneja }
998d58ef34SArchit Taneja
1008d58ef34SArchit Taneja static struct drm_private_state *
mdp5_global_duplicate_state(struct drm_private_obj * obj)1018d58ef34SArchit Taneja mdp5_global_duplicate_state(struct drm_private_obj *obj)
1028d58ef34SArchit Taneja {
1038d58ef34SArchit Taneja struct mdp5_global_state *state;
1048d58ef34SArchit Taneja
1058d58ef34SArchit Taneja state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
1068d58ef34SArchit Taneja if (!state)
1078d58ef34SArchit Taneja return NULL;
1088d58ef34SArchit Taneja
1098d58ef34SArchit Taneja __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
1108d58ef34SArchit Taneja
1118d58ef34SArchit Taneja return &state->base;
1128d58ef34SArchit Taneja }
1138d58ef34SArchit Taneja
mdp5_global_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)1148d58ef34SArchit Taneja static void mdp5_global_destroy_state(struct drm_private_obj *obj,
1158d58ef34SArchit Taneja struct drm_private_state *state)
1168d58ef34SArchit Taneja {
1178d58ef34SArchit Taneja struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
1188d58ef34SArchit Taneja
1198d58ef34SArchit Taneja kfree(mdp5_state);
1208d58ef34SArchit Taneja }
1218d58ef34SArchit Taneja
1228d58ef34SArchit Taneja static const struct drm_private_state_funcs mdp5_global_state_funcs = {
1238d58ef34SArchit Taneja .atomic_duplicate_state = mdp5_global_duplicate_state,
1248d58ef34SArchit Taneja .atomic_destroy_state = mdp5_global_destroy_state,
1258d58ef34SArchit Taneja };
1268d58ef34SArchit Taneja
mdp5_global_obj_init(struct mdp5_kms * mdp5_kms)1278d58ef34SArchit Taneja static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
1288d58ef34SArchit Taneja {
1298d58ef34SArchit Taneja struct mdp5_global_state *state;
1308d58ef34SArchit Taneja
1318d58ef34SArchit Taneja drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
1328d58ef34SArchit Taneja
1338d58ef34SArchit Taneja state = kzalloc(sizeof(*state), GFP_KERNEL);
1348d58ef34SArchit Taneja if (!state)
1358d58ef34SArchit Taneja return -ENOMEM;
1368d58ef34SArchit Taneja
1378d58ef34SArchit Taneja state->mdp5_kms = mdp5_kms;
1388d58ef34SArchit Taneja
139b962a120SRob Clark drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
1408d58ef34SArchit Taneja &state->base,
1418d58ef34SArchit Taneja &mdp5_global_state_funcs);
1428d58ef34SArchit Taneja return 0;
1438d58ef34SArchit Taneja }
1448d58ef34SArchit Taneja
mdp5_enable_commit(struct msm_kms * kms)145e35a29d5SRob Clark static void mdp5_enable_commit(struct msm_kms *kms)
146e35a29d5SRob Clark {
147e35a29d5SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
148e35a29d5SRob Clark pm_runtime_get_sync(&mdp5_kms->pdev->dev);
149e35a29d5SRob Clark }
150e35a29d5SRob Clark
mdp5_disable_commit(struct msm_kms * kms)151e35a29d5SRob Clark static void mdp5_disable_commit(struct msm_kms *kms)
152e35a29d5SRob Clark {
153e35a29d5SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
154e35a29d5SRob Clark pm_runtime_put_sync(&mdp5_kms->pdev->dev);
155e35a29d5SRob Clark }
156e35a29d5SRob Clark
mdp5_prepare_commit(struct msm_kms * kms,struct drm_atomic_state * state)15714be3200SRob Clark static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
15814be3200SRob Clark {
15914be3200SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
1607907a0d7SArchit Taneja struct mdp5_global_state *global_state;
1617907a0d7SArchit Taneja
1627907a0d7SArchit Taneja global_state = mdp5_get_existing_global_state(mdp5_kms);
16314be3200SRob Clark
16414be3200SRob Clark if (mdp5_kms->smp)
1657907a0d7SArchit Taneja mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
16614be3200SRob Clark }
16714be3200SRob Clark
mdp5_flush_commit(struct msm_kms * kms,unsigned crtc_mask)1689f6b6564SRob Clark static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
1699f6b6564SRob Clark {
1709f6b6564SRob Clark /* TODO */
1719f6b6564SRob Clark }
1729f6b6564SRob Clark
mdp5_wait_flush(struct msm_kms * kms,unsigned crtc_mask)173d4d2c604SRob Clark static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
174d4d2c604SRob Clark {
175d4d2c604SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
176d4d2c604SRob Clark struct drm_crtc *crtc;
177d4d2c604SRob Clark
178d4d2c604SRob Clark for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
179d4d2c604SRob Clark mdp5_crtc_wait_for_commit_done(crtc);
180d4d2c604SRob Clark }
181d4d2c604SRob Clark
mdp5_complete_commit(struct msm_kms * kms,unsigned crtc_mask)18280b4b4a7SRob Clark static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
18314be3200SRob Clark {
18414be3200SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
1857907a0d7SArchit Taneja struct mdp5_global_state *global_state;
1867907a0d7SArchit Taneja
1877907a0d7SArchit Taneja global_state = mdp5_get_existing_global_state(mdp5_kms);
18814be3200SRob Clark
18914be3200SRob Clark if (mdp5_kms->smp)
1907907a0d7SArchit Taneja mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
19114be3200SRob Clark }
19214be3200SRob Clark
mdp5_set_split_display(struct msm_kms * kms,struct drm_encoder * encoder,struct drm_encoder * slave_encoder,bool is_cmd_mode)19314be3200SRob Clark static int mdp5_set_split_display(struct msm_kms *kms,
19414be3200SRob Clark struct drm_encoder *encoder,
19514be3200SRob Clark struct drm_encoder *slave_encoder,
19614be3200SRob Clark bool is_cmd_mode)
19714be3200SRob Clark {
19814be3200SRob Clark if (is_cmd_mode)
19914be3200SRob Clark return mdp5_cmd_encoder_set_split_display(encoder,
20014be3200SRob Clark slave_encoder);
20114be3200SRob Clark else
20214be3200SRob Clark return mdp5_vid_encoder_set_split_display(encoder,
20314be3200SRob Clark slave_encoder);
20414be3200SRob Clark }
20514be3200SRob Clark
206c6122688SDmitry Baryshkov static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
2076874f48bSDmitry Baryshkov
mdp5_kms_destroy(struct msm_kms * kms)20814be3200SRob Clark static void mdp5_kms_destroy(struct msm_kms *kms)
20914be3200SRob Clark {
21014be3200SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
21114be3200SRob Clark struct msm_gem_address_space *aspace = kms->aspace;
21214be3200SRob Clark int i;
21314be3200SRob Clark
21414be3200SRob Clark for (i = 0; i < mdp5_kms->num_hwmixers; i++)
21514be3200SRob Clark mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
21614be3200SRob Clark
21714be3200SRob Clark for (i = 0; i < mdp5_kms->num_hwpipes; i++)
21814be3200SRob Clark mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
21914be3200SRob Clark
22014be3200SRob Clark if (aspace) {
22153bf7f7aSDrew Davenport aspace->mmu->funcs->detach(aspace->mmu);
22214be3200SRob Clark msm_gem_address_space_put(aspace);
22314be3200SRob Clark }
224ffe71111SRob Clark
225ffe71111SRob Clark mdp_kms_destroy(&mdp5_kms->base);
226c6122688SDmitry Baryshkov mdp5_destroy(mdp5_kms);
22714be3200SRob Clark }
22814be3200SRob Clark
22914be3200SRob Clark #ifdef CONFIG_DEBUG_FS
smp_show(struct seq_file * m,void * arg)23014be3200SRob Clark static int smp_show(struct seq_file *m, void *arg)
23114be3200SRob Clark {
232eea9cf72SSu Hui struct drm_info_node *node = m->private;
23314be3200SRob Clark struct drm_device *dev = node->minor->dev;
23414be3200SRob Clark struct msm_drm_private *priv = dev->dev_private;
23514be3200SRob Clark struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
23614be3200SRob Clark struct drm_printer p = drm_seq_file_printer(m);
23714be3200SRob Clark
23814be3200SRob Clark if (!mdp5_kms->smp) {
23914be3200SRob Clark drm_printf(&p, "no SMP pool\n");
24014be3200SRob Clark return 0;
24114be3200SRob Clark }
24214be3200SRob Clark
24314be3200SRob Clark mdp5_smp_dump(mdp5_kms->smp, &p);
24414be3200SRob Clark
24514be3200SRob Clark return 0;
24614be3200SRob Clark }
24714be3200SRob Clark
24814be3200SRob Clark static struct drm_info_list mdp5_debugfs_list[] = {
24914be3200SRob Clark {"smp", smp_show },
25014be3200SRob Clark };
25114be3200SRob Clark
mdp5_kms_debugfs_init(struct msm_kms * kms,struct drm_minor * minor)25214be3200SRob Clark static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
25314be3200SRob Clark {
2549e2fd463SWambui Karuga drm_debugfs_create_files(mdp5_debugfs_list,
25514be3200SRob Clark ARRAY_SIZE(mdp5_debugfs_list),
25614be3200SRob Clark minor->debugfs_root, minor);
25714be3200SRob Clark
25814be3200SRob Clark return 0;
25914be3200SRob Clark }
26014be3200SRob Clark #endif
26114be3200SRob Clark
26214be3200SRob Clark static const struct mdp_kms_funcs kms_funcs = {
26314be3200SRob Clark .base = {
26414be3200SRob Clark .hw_init = mdp5_hw_init,
26514be3200SRob Clark .irq_preinstall = mdp5_irq_preinstall,
26614be3200SRob Clark .irq_postinstall = mdp5_irq_postinstall,
26714be3200SRob Clark .irq_uninstall = mdp5_irq_uninstall,
26814be3200SRob Clark .irq = mdp5_irq,
26914be3200SRob Clark .enable_vblank = mdp5_enable_vblank,
27014be3200SRob Clark .disable_vblank = mdp5_disable_vblank,
2719f6b6564SRob Clark .flush_commit = mdp5_flush_commit,
272e35a29d5SRob Clark .enable_commit = mdp5_enable_commit,
273e35a29d5SRob Clark .disable_commit = mdp5_disable_commit,
27414be3200SRob Clark .prepare_commit = mdp5_prepare_commit,
275d4d2c604SRob Clark .wait_flush = mdp5_wait_flush,
27614be3200SRob Clark .complete_commit = mdp5_complete_commit,
27714be3200SRob Clark .get_format = mdp_get_format,
27814be3200SRob Clark .set_split_display = mdp5_set_split_display,
27914be3200SRob Clark .destroy = mdp5_kms_destroy,
28014be3200SRob Clark #ifdef CONFIG_DEBUG_FS
28114be3200SRob Clark .debugfs_init = mdp5_kms_debugfs_init,
28214be3200SRob Clark #endif
28314be3200SRob Clark },
28414be3200SRob Clark .set_irqmask = mdp5_set_irqmask,
28514be3200SRob Clark };
28614be3200SRob Clark
mdp5_disable(struct mdp5_kms * mdp5_kms)287da640b3eSLee Jones static int mdp5_disable(struct mdp5_kms *mdp5_kms)
28814be3200SRob Clark {
28914be3200SRob Clark DBG("");
29014be3200SRob Clark
29114be3200SRob Clark mdp5_kms->enable_count--;
29214be3200SRob Clark WARN_ON(mdp5_kms->enable_count < 0);
29314be3200SRob Clark
2941c2a9f25SAngeloGioacchino Del Regno clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
2951c2a9f25SAngeloGioacchino Del Regno clk_disable_unprepare(mdp5_kms->tbu_clk);
29614be3200SRob Clark clk_disable_unprepare(mdp5_kms->ahb_clk);
29714be3200SRob Clark clk_disable_unprepare(mdp5_kms->axi_clk);
29814be3200SRob Clark clk_disable_unprepare(mdp5_kms->core_clk);
29914be3200SRob Clark clk_disable_unprepare(mdp5_kms->lut_clk);
30014be3200SRob Clark
30114be3200SRob Clark return 0;
30214be3200SRob Clark }
30314be3200SRob Clark
mdp5_enable(struct mdp5_kms * mdp5_kms)304da640b3eSLee Jones static int mdp5_enable(struct mdp5_kms *mdp5_kms)
30514be3200SRob Clark {
30614be3200SRob Clark DBG("");
30714be3200SRob Clark
30814be3200SRob Clark mdp5_kms->enable_count++;
30914be3200SRob Clark
31014be3200SRob Clark clk_prepare_enable(mdp5_kms->ahb_clk);
31114be3200SRob Clark clk_prepare_enable(mdp5_kms->axi_clk);
31214be3200SRob Clark clk_prepare_enable(mdp5_kms->core_clk);
31314be3200SRob Clark clk_prepare_enable(mdp5_kms->lut_clk);
3141c2a9f25SAngeloGioacchino Del Regno clk_prepare_enable(mdp5_kms->tbu_clk);
3151c2a9f25SAngeloGioacchino Del Regno clk_prepare_enable(mdp5_kms->tbu_rt_clk);
31614be3200SRob Clark
31714be3200SRob Clark return 0;
31814be3200SRob Clark }
31914be3200SRob Clark
construct_encoder(struct mdp5_kms * mdp5_kms,struct mdp5_interface * intf,struct mdp5_ctl * ctl)32014be3200SRob Clark static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
32114be3200SRob Clark struct mdp5_interface *intf,
32214be3200SRob Clark struct mdp5_ctl *ctl)
32314be3200SRob Clark {
32414be3200SRob Clark struct drm_device *dev = mdp5_kms->dev;
32514be3200SRob Clark struct drm_encoder *encoder;
32614be3200SRob Clark
32714be3200SRob Clark encoder = mdp5_encoder_init(dev, intf, ctl);
32814be3200SRob Clark if (IS_ERR(encoder)) {
3296a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
33014be3200SRob Clark return encoder;
33114be3200SRob Clark }
33214be3200SRob Clark
33314be3200SRob Clark return encoder;
33414be3200SRob Clark }
33514be3200SRob Clark
get_dsi_id_from_intf(const struct mdp5_cfg_hw * hw_cfg,int intf_num)33614be3200SRob Clark static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
33714be3200SRob Clark {
33814be3200SRob Clark const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
33914be3200SRob Clark const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
34014be3200SRob Clark int id = 0, i;
34114be3200SRob Clark
34214be3200SRob Clark for (i = 0; i < intf_cnt; i++) {
34314be3200SRob Clark if (intfs[i] == INTF_DSI) {
34414be3200SRob Clark if (intf_num == i)
34514be3200SRob Clark return id;
34614be3200SRob Clark
34714be3200SRob Clark id++;
34814be3200SRob Clark }
34914be3200SRob Clark }
35014be3200SRob Clark
35114be3200SRob Clark return -EINVAL;
35214be3200SRob Clark }
35314be3200SRob Clark
modeset_init_intf(struct mdp5_kms * mdp5_kms,struct mdp5_interface * intf)35414be3200SRob Clark static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
35514be3200SRob Clark struct mdp5_interface *intf)
35614be3200SRob Clark {
35714be3200SRob Clark struct drm_device *dev = mdp5_kms->dev;
35814be3200SRob Clark struct msm_drm_private *priv = dev->dev_private;
35914be3200SRob Clark struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
36014be3200SRob Clark struct mdp5_ctl *ctl;
36114be3200SRob Clark struct drm_encoder *encoder;
36214be3200SRob Clark int ret = 0;
36314be3200SRob Clark
36414be3200SRob Clark switch (intf->type) {
36514be3200SRob Clark case INTF_eDP:
3669ab3d271SDmitry Baryshkov DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
36714be3200SRob Clark break;
36814be3200SRob Clark case INTF_HDMI:
36914be3200SRob Clark if (!priv->hdmi)
37014be3200SRob Clark break;
37114be3200SRob Clark
37214be3200SRob Clark ctl = mdp5_ctlm_request(ctlm, intf->num);
37314be3200SRob Clark if (!ctl) {
37414be3200SRob Clark ret = -EINVAL;
37514be3200SRob Clark break;
37614be3200SRob Clark }
37714be3200SRob Clark
37814be3200SRob Clark encoder = construct_encoder(mdp5_kms, intf, ctl);
37914be3200SRob Clark if (IS_ERR(encoder)) {
38014be3200SRob Clark ret = PTR_ERR(encoder);
38114be3200SRob Clark break;
38214be3200SRob Clark }
38314be3200SRob Clark
38414be3200SRob Clark ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
38514be3200SRob Clark break;
38614be3200SRob Clark case INTF_DSI:
38714be3200SRob Clark {
38814be3200SRob Clark const struct mdp5_cfg_hw *hw_cfg =
38914be3200SRob Clark mdp5_cfg_get_hw_config(mdp5_kms->cfg);
39014be3200SRob Clark int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
39114be3200SRob Clark
39214be3200SRob Clark if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
3936a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
39414be3200SRob Clark intf->num);
39514be3200SRob Clark ret = -EINVAL;
39614be3200SRob Clark break;
39714be3200SRob Clark }
39814be3200SRob Clark
39914be3200SRob Clark if (!priv->dsi[dsi_id])
40014be3200SRob Clark break;
40114be3200SRob Clark
40214be3200SRob Clark ctl = mdp5_ctlm_request(ctlm, intf->num);
40314be3200SRob Clark if (!ctl) {
40414be3200SRob Clark ret = -EINVAL;
40514be3200SRob Clark break;
40614be3200SRob Clark }
40714be3200SRob Clark
40814be3200SRob Clark encoder = construct_encoder(mdp5_kms, intf, ctl);
40914be3200SRob Clark if (IS_ERR(encoder)) {
41014be3200SRob Clark ret = PTR_ERR(encoder);
41114be3200SRob Clark break;
41214be3200SRob Clark }
41314be3200SRob Clark
41414be3200SRob Clark ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
4150f1b69feSDmitry Baryshkov if (!ret)
4160f1b69feSDmitry Baryshkov mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
4170f1b69feSDmitry Baryshkov
41814be3200SRob Clark break;
41914be3200SRob Clark }
42014be3200SRob Clark default:
4216a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
42214be3200SRob Clark ret = -EINVAL;
42314be3200SRob Clark break;
42414be3200SRob Clark }
42514be3200SRob Clark
42614be3200SRob Clark return ret;
42714be3200SRob Clark }
42814be3200SRob Clark
modeset_init(struct mdp5_kms * mdp5_kms)42914be3200SRob Clark static int modeset_init(struct mdp5_kms *mdp5_kms)
43014be3200SRob Clark {
43114be3200SRob Clark struct drm_device *dev = mdp5_kms->dev;
43214be3200SRob Clark struct msm_drm_private *priv = dev->dev_private;
43314be3200SRob Clark unsigned int num_crtcs;
43414be3200SRob Clark int i, ret, pi = 0, ci = 0;
43514be3200SRob Clark struct drm_plane *primary[MAX_BASES] = { NULL };
43614be3200SRob Clark struct drm_plane *cursor[MAX_BASES] = { NULL };
437fa560afaSDmitry Baryshkov struct drm_encoder *encoder;
4380054ac2cSDmitry Baryshkov unsigned int num_encoders;
43914be3200SRob Clark
44014be3200SRob Clark /*
44114be3200SRob Clark * Construct encoders and modeset initialize connector devices
44214be3200SRob Clark * for each external display interface.
44314be3200SRob Clark */
44414be3200SRob Clark for (i = 0; i < mdp5_kms->num_intfs; i++) {
44514be3200SRob Clark ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
44614be3200SRob Clark if (ret)
44714be3200SRob Clark goto fail;
44814be3200SRob Clark }
44914be3200SRob Clark
4500054ac2cSDmitry Baryshkov num_encoders = 0;
4510054ac2cSDmitry Baryshkov drm_for_each_encoder(encoder, dev)
4520054ac2cSDmitry Baryshkov num_encoders++;
4530054ac2cSDmitry Baryshkov
45414be3200SRob Clark /*
45514be3200SRob Clark * We should ideally have less number of encoders (set up by parsing
45614be3200SRob Clark * the MDP5 interfaces) than the number of layer mixers present in HW,
45714be3200SRob Clark * but let's be safe here anyway
45814be3200SRob Clark */
4590054ac2cSDmitry Baryshkov num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
46014be3200SRob Clark
46114be3200SRob Clark /*
46214be3200SRob Clark * Construct planes equaling the number of hw pipes, and CRTCs for the
46314be3200SRob Clark * N encoders set up by the driver. The first N planes become primary
46414be3200SRob Clark * planes for the CRTCs, with the remainder as overlay planes:
46514be3200SRob Clark */
46614be3200SRob Clark for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
46714be3200SRob Clark struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
46814be3200SRob Clark struct drm_plane *plane;
46914be3200SRob Clark enum drm_plane_type type;
47014be3200SRob Clark
47114be3200SRob Clark if (i < num_crtcs)
47214be3200SRob Clark type = DRM_PLANE_TYPE_PRIMARY;
47314be3200SRob Clark else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
47414be3200SRob Clark type = DRM_PLANE_TYPE_CURSOR;
47514be3200SRob Clark else
47614be3200SRob Clark type = DRM_PLANE_TYPE_OVERLAY;
47714be3200SRob Clark
47814be3200SRob Clark plane = mdp5_plane_init(dev, type);
47914be3200SRob Clark if (IS_ERR(plane)) {
48014be3200SRob Clark ret = PTR_ERR(plane);
4816a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
48214be3200SRob Clark goto fail;
48314be3200SRob Clark }
48414be3200SRob Clark
48514be3200SRob Clark if (type == DRM_PLANE_TYPE_PRIMARY)
48614be3200SRob Clark primary[pi++] = plane;
48714be3200SRob Clark if (type == DRM_PLANE_TYPE_CURSOR)
48814be3200SRob Clark cursor[ci++] = plane;
48914be3200SRob Clark }
49014be3200SRob Clark
49114be3200SRob Clark for (i = 0; i < num_crtcs; i++) {
49214be3200SRob Clark struct drm_crtc *crtc;
49314be3200SRob Clark
49414be3200SRob Clark crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
49514be3200SRob Clark if (IS_ERR(crtc)) {
49614be3200SRob Clark ret = PTR_ERR(crtc);
4976a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
49814be3200SRob Clark goto fail;
49914be3200SRob Clark }
500*274f1614SDmitry Baryshkov priv->num_crtcs++;
50114be3200SRob Clark }
50214be3200SRob Clark
50314be3200SRob Clark /*
50414be3200SRob Clark * Now that we know the number of crtcs we've created, set the possible
50514be3200SRob Clark * crtcs for the encoders
50614be3200SRob Clark */
507fa560afaSDmitry Baryshkov drm_for_each_encoder(encoder, dev)
50814be3200SRob Clark encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
50914be3200SRob Clark
51014be3200SRob Clark return 0;
51114be3200SRob Clark
51214be3200SRob Clark fail:
51314be3200SRob Clark return ret;
51414be3200SRob Clark }
51514be3200SRob Clark
read_mdp_hw_revision(struct mdp5_kms * mdp5_kms,u32 * major,u32 * minor)51614be3200SRob Clark static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
51714be3200SRob Clark u32 *major, u32 *minor)
51814be3200SRob Clark {
51914be3200SRob Clark struct device *dev = &mdp5_kms->pdev->dev;
52014be3200SRob Clark u32 version;
52114be3200SRob Clark
5225d8c0417SDmitry Baryshkov pm_runtime_get_sync(dev);
52314be3200SRob Clark version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
5245d8c0417SDmitry Baryshkov pm_runtime_put_sync(dev);
52514be3200SRob Clark
52614be3200SRob Clark *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
52714be3200SRob Clark *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
52814be3200SRob Clark
5296a41da17SMamta Shukla DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
53014be3200SRob Clark }
53114be3200SRob Clark
get_clk(struct platform_device * pdev,struct clk ** clkp,const char * name,bool mandatory)53214be3200SRob Clark static int get_clk(struct platform_device *pdev, struct clk **clkp,
53314be3200SRob Clark const char *name, bool mandatory)
53414be3200SRob Clark {
53514be3200SRob Clark struct device *dev = &pdev->dev;
53614be3200SRob Clark struct clk *clk = msm_clk_get(pdev, name);
53714be3200SRob Clark if (IS_ERR(clk) && mandatory) {
5386a41da17SMamta Shukla DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
53914be3200SRob Clark return PTR_ERR(clk);
54014be3200SRob Clark }
54114be3200SRob Clark if (IS_ERR(clk))
54214be3200SRob Clark DBG("skipping %s", name);
54314be3200SRob Clark else
54414be3200SRob Clark *clkp = clk;
54514be3200SRob Clark
54614be3200SRob Clark return 0;
54714be3200SRob Clark }
54814be3200SRob Clark
5496874f48bSDmitry Baryshkov static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
5506874f48bSDmitry Baryshkov
mdp5_kms_init(struct drm_device * dev)5515d44531bSDmitry Baryshkov static int mdp5_kms_init(struct drm_device *dev)
55214be3200SRob Clark {
55314be3200SRob Clark struct msm_drm_private *priv = dev->dev_private;
55414be3200SRob Clark struct platform_device *pdev;
55514be3200SRob Clark struct mdp5_kms *mdp5_kms;
55614be3200SRob Clark struct mdp5_cfg *config;
55714be3200SRob Clark struct msm_kms *kms;
55814be3200SRob Clark struct msm_gem_address_space *aspace;
55914be3200SRob Clark int irq, i, ret;
56014be3200SRob Clark
5616874f48bSDmitry Baryshkov ret = mdp5_init(to_platform_device(dev->dev), dev);
562c6122688SDmitry Baryshkov if (ret)
563c6122688SDmitry Baryshkov return ret;
5646874f48bSDmitry Baryshkov
56514be3200SRob Clark /* priv->kms would have been populated by the MDP5 driver */
56614be3200SRob Clark kms = priv->kms;
56714be3200SRob Clark if (!kms)
5685d44531bSDmitry Baryshkov return -ENOMEM;
56914be3200SRob Clark
57014be3200SRob Clark mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
57114be3200SRob Clark pdev = mdp5_kms->pdev;
57214be3200SRob Clark
573ffe71111SRob Clark ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
574ffe71111SRob Clark if (ret) {
575ffe71111SRob Clark DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n");
576ffe71111SRob Clark goto fail;
577ffe71111SRob Clark }
578ffe71111SRob Clark
57914be3200SRob Clark irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
580b9e4f1d2SLv Ruyi if (!irq) {
581b9e4f1d2SLv Ruyi ret = -EINVAL;
582b9e4f1d2SLv Ruyi DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
58314be3200SRob Clark goto fail;
58414be3200SRob Clark }
58514be3200SRob Clark
58614be3200SRob Clark kms->irq = irq;
58714be3200SRob Clark
58814be3200SRob Clark config = mdp5_cfg_get_config(mdp5_kms->cfg);
58914be3200SRob Clark
59014be3200SRob Clark /* make sure things are off before attaching iommu (bootloader could
59114be3200SRob Clark * have left things on, in which case we'll start getting faults if
59214be3200SRob Clark * we don't disable):
59314be3200SRob Clark */
59414be3200SRob Clark pm_runtime_get_sync(&pdev->dev);
59514be3200SRob Clark for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
59614be3200SRob Clark if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
59714be3200SRob Clark !config->hw->intf.base[i])
59814be3200SRob Clark continue;
59914be3200SRob Clark mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
60014be3200SRob Clark
60114be3200SRob Clark mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
60214be3200SRob Clark }
60314be3200SRob Clark mdelay(16);
60414be3200SRob Clark
60540ae54edSDmitry Baryshkov aspace = msm_kms_init_aspace(mdp5_kms->dev);
60614be3200SRob Clark if (IS_ERR(aspace)) {
60714be3200SRob Clark ret = PTR_ERR(aspace);
60814be3200SRob Clark goto fail;
60914be3200SRob Clark }
61014be3200SRob Clark
61114be3200SRob Clark kms->aspace = aspace;
61214be3200SRob Clark
61314be3200SRob Clark pm_runtime_put_sync(&pdev->dev);
61414be3200SRob Clark
61514be3200SRob Clark ret = modeset_init(mdp5_kms);
61614be3200SRob Clark if (ret) {
6176a41da17SMamta Shukla DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
61814be3200SRob Clark goto fail;
61914be3200SRob Clark }
62014be3200SRob Clark
62114be3200SRob Clark dev->mode_config.min_width = 0;
62214be3200SRob Clark dev->mode_config.min_height = 0;
62314be3200SRob Clark dev->mode_config.max_width = 0xffff;
62414be3200SRob Clark dev->mode_config.max_height = 0xffff;
62514be3200SRob Clark
6262bab52afSBrian Masney dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
62714be3200SRob Clark dev->vblank_disable_immediate = true;
62814be3200SRob Clark
6295d44531bSDmitry Baryshkov return 0;
63014be3200SRob Clark fail:
63114be3200SRob Clark if (kms)
63214be3200SRob Clark mdp5_kms_destroy(kms);
6335d44531bSDmitry Baryshkov
6345d44531bSDmitry Baryshkov return ret;
63514be3200SRob Clark }
63614be3200SRob Clark
mdp5_destroy(struct mdp5_kms * mdp5_kms)637c6122688SDmitry Baryshkov static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
63814be3200SRob Clark {
63914be3200SRob Clark int i;
64014be3200SRob Clark
64114be3200SRob Clark if (mdp5_kms->ctlm)
64214be3200SRob Clark mdp5_ctlm_destroy(mdp5_kms->ctlm);
64314be3200SRob Clark if (mdp5_kms->smp)
64414be3200SRob Clark mdp5_smp_destroy(mdp5_kms->smp);
64514be3200SRob Clark if (mdp5_kms->cfg)
64614be3200SRob Clark mdp5_cfg_destroy(mdp5_kms->cfg);
64714be3200SRob Clark
64814be3200SRob Clark for (i = 0; i < mdp5_kms->num_intfs; i++)
64914be3200SRob Clark kfree(mdp5_kms->intfs[i]);
65014be3200SRob Clark
65114be3200SRob Clark if (mdp5_kms->rpm_enabled)
652c6122688SDmitry Baryshkov pm_runtime_disable(&mdp5_kms->pdev->dev);
65314be3200SRob Clark
6548d58ef34SArchit Taneja drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
6558d58ef34SArchit Taneja drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
65614be3200SRob Clark }
65714be3200SRob Clark
construct_pipes(struct mdp5_kms * mdp5_kms,int cnt,const enum mdp5_pipe * pipes,const uint32_t * offsets,uint32_t caps)65814be3200SRob Clark static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
65914be3200SRob Clark const enum mdp5_pipe *pipes, const uint32_t *offsets,
66014be3200SRob Clark uint32_t caps)
66114be3200SRob Clark {
66214be3200SRob Clark struct drm_device *dev = mdp5_kms->dev;
66314be3200SRob Clark int i, ret;
66414be3200SRob Clark
66514be3200SRob Clark for (i = 0; i < cnt; i++) {
66614be3200SRob Clark struct mdp5_hw_pipe *hwpipe;
66714be3200SRob Clark
66814be3200SRob Clark hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
66914be3200SRob Clark if (IS_ERR(hwpipe)) {
67014be3200SRob Clark ret = PTR_ERR(hwpipe);
6716a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
67214be3200SRob Clark pipe2name(pipes[i]), ret);
67314be3200SRob Clark return ret;
67414be3200SRob Clark }
67514be3200SRob Clark hwpipe->idx = mdp5_kms->num_hwpipes;
67614be3200SRob Clark mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
67714be3200SRob Clark }
67814be3200SRob Clark
67914be3200SRob Clark return 0;
68014be3200SRob Clark }
68114be3200SRob Clark
hwpipe_init(struct mdp5_kms * mdp5_kms)68214be3200SRob Clark static int hwpipe_init(struct mdp5_kms *mdp5_kms)
68314be3200SRob Clark {
68414be3200SRob Clark static const enum mdp5_pipe rgb_planes[] = {
68514be3200SRob Clark SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
68614be3200SRob Clark };
68714be3200SRob Clark static const enum mdp5_pipe vig_planes[] = {
68814be3200SRob Clark SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
68914be3200SRob Clark };
69014be3200SRob Clark static const enum mdp5_pipe dma_planes[] = {
69114be3200SRob Clark SSPP_DMA0, SSPP_DMA1,
69214be3200SRob Clark };
69314be3200SRob Clark static const enum mdp5_pipe cursor_planes[] = {
69414be3200SRob Clark SSPP_CURSOR0, SSPP_CURSOR1,
69514be3200SRob Clark };
69614be3200SRob Clark const struct mdp5_cfg_hw *hw_cfg;
69714be3200SRob Clark int ret;
69814be3200SRob Clark
69914be3200SRob Clark hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
70014be3200SRob Clark
70114be3200SRob Clark /* Construct RGB pipes: */
70214be3200SRob Clark ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
70314be3200SRob Clark hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
70414be3200SRob Clark if (ret)
70514be3200SRob Clark return ret;
70614be3200SRob Clark
70714be3200SRob Clark /* Construct video (VIG) pipes: */
70814be3200SRob Clark ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
70914be3200SRob Clark hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
71014be3200SRob Clark if (ret)
71114be3200SRob Clark return ret;
71214be3200SRob Clark
71314be3200SRob Clark /* Construct DMA pipes: */
71414be3200SRob Clark ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
71514be3200SRob Clark hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
71614be3200SRob Clark if (ret)
71714be3200SRob Clark return ret;
71814be3200SRob Clark
71914be3200SRob Clark /* Construct cursor pipes: */
72014be3200SRob Clark ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
72114be3200SRob Clark cursor_planes, hw_cfg->pipe_cursor.base,
72214be3200SRob Clark hw_cfg->pipe_cursor.caps);
72314be3200SRob Clark if (ret)
72414be3200SRob Clark return ret;
72514be3200SRob Clark
72614be3200SRob Clark return 0;
72714be3200SRob Clark }
72814be3200SRob Clark
hwmixer_init(struct mdp5_kms * mdp5_kms)72914be3200SRob Clark static int hwmixer_init(struct mdp5_kms *mdp5_kms)
73014be3200SRob Clark {
73114be3200SRob Clark struct drm_device *dev = mdp5_kms->dev;
73214be3200SRob Clark const struct mdp5_cfg_hw *hw_cfg;
73314be3200SRob Clark int i, ret;
73414be3200SRob Clark
73514be3200SRob Clark hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
73614be3200SRob Clark
73714be3200SRob Clark for (i = 0; i < hw_cfg->lm.count; i++) {
73814be3200SRob Clark struct mdp5_hw_mixer *mixer;
73914be3200SRob Clark
74014be3200SRob Clark mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
74114be3200SRob Clark if (IS_ERR(mixer)) {
74214be3200SRob Clark ret = PTR_ERR(mixer);
7436a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
74414be3200SRob Clark i, ret);
74514be3200SRob Clark return ret;
74614be3200SRob Clark }
74714be3200SRob Clark
74814be3200SRob Clark mixer->idx = mdp5_kms->num_hwmixers;
74914be3200SRob Clark mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
75014be3200SRob Clark }
75114be3200SRob Clark
75214be3200SRob Clark return 0;
75314be3200SRob Clark }
75414be3200SRob Clark
interface_init(struct mdp5_kms * mdp5_kms)75514be3200SRob Clark static int interface_init(struct mdp5_kms *mdp5_kms)
75614be3200SRob Clark {
75714be3200SRob Clark struct drm_device *dev = mdp5_kms->dev;
75814be3200SRob Clark const struct mdp5_cfg_hw *hw_cfg;
75914be3200SRob Clark const enum mdp5_intf_type *intf_types;
76014be3200SRob Clark int i;
76114be3200SRob Clark
76214be3200SRob Clark hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
76314be3200SRob Clark intf_types = hw_cfg->intf.connect;
76414be3200SRob Clark
76514be3200SRob Clark for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
76614be3200SRob Clark struct mdp5_interface *intf;
76714be3200SRob Clark
76814be3200SRob Clark if (intf_types[i] == INTF_DISABLED)
76914be3200SRob Clark continue;
77014be3200SRob Clark
77114be3200SRob Clark intf = kzalloc(sizeof(*intf), GFP_KERNEL);
77214be3200SRob Clark if (!intf) {
7736a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
77414be3200SRob Clark return -ENOMEM;
77514be3200SRob Clark }
77614be3200SRob Clark
77714be3200SRob Clark intf->num = i;
77814be3200SRob Clark intf->type = intf_types[i];
77914be3200SRob Clark intf->mode = MDP5_INTF_MODE_NONE;
78014be3200SRob Clark intf->idx = mdp5_kms->num_intfs;
78114be3200SRob Clark mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
78214be3200SRob Clark }
78314be3200SRob Clark
78414be3200SRob Clark return 0;
78514be3200SRob Clark }
78614be3200SRob Clark
mdp5_init(struct platform_device * pdev,struct drm_device * dev)78714be3200SRob Clark static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
78814be3200SRob Clark {
78914be3200SRob Clark struct msm_drm_private *priv = dev->dev_private;
79014be3200SRob Clark struct mdp5_kms *mdp5_kms;
79114be3200SRob Clark struct mdp5_cfg *config;
79214be3200SRob Clark u32 major, minor;
79314be3200SRob Clark int ret;
79414be3200SRob Clark
79514be3200SRob Clark mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
79614be3200SRob Clark if (!mdp5_kms) {
79714be3200SRob Clark ret = -ENOMEM;
79814be3200SRob Clark goto fail;
79914be3200SRob Clark }
80014be3200SRob Clark
80114be3200SRob Clark spin_lock_init(&mdp5_kms->resource_lock);
80214be3200SRob Clark
80314be3200SRob Clark mdp5_kms->dev = dev;
80414be3200SRob Clark mdp5_kms->pdev = pdev;
80514be3200SRob Clark
8068d58ef34SArchit Taneja ret = mdp5_global_obj_init(mdp5_kms);
8078d58ef34SArchit Taneja if (ret)
8088d58ef34SArchit Taneja goto fail;
8098d58ef34SArchit Taneja
810c0e745d7SDmitry Baryshkov mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
81114be3200SRob Clark if (IS_ERR(mdp5_kms->mmio)) {
81214be3200SRob Clark ret = PTR_ERR(mdp5_kms->mmio);
81314be3200SRob Clark goto fail;
81414be3200SRob Clark }
81514be3200SRob Clark
81614be3200SRob Clark /* mandatory clocks: */
81714be3200SRob Clark ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
81814be3200SRob Clark if (ret)
81914be3200SRob Clark goto fail;
82014be3200SRob Clark ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
82114be3200SRob Clark if (ret)
82214be3200SRob Clark goto fail;
82314be3200SRob Clark ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
82414be3200SRob Clark if (ret)
82514be3200SRob Clark goto fail;
82614be3200SRob Clark ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
82714be3200SRob Clark if (ret)
82814be3200SRob Clark goto fail;
82914be3200SRob Clark
83014be3200SRob Clark /* optional clocks: */
83114be3200SRob Clark get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
8321c2a9f25SAngeloGioacchino Del Regno get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
8331c2a9f25SAngeloGioacchino Del Regno get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
83414be3200SRob Clark
83514be3200SRob Clark /* we need to set a default rate before enabling. Set a safe
83614be3200SRob Clark * rate first, then figure out hw revision, and then set a
83714be3200SRob Clark * more optimal rate:
83814be3200SRob Clark */
83914be3200SRob Clark clk_set_rate(mdp5_kms->core_clk, 200000000);
84014be3200SRob Clark
8415d8c0417SDmitry Baryshkov /* set uninit-ed kms */
8425d8c0417SDmitry Baryshkov priv->kms = &mdp5_kms->base.base;
8435d8c0417SDmitry Baryshkov
8445d8c0417SDmitry Baryshkov pm_runtime_enable(&pdev->dev);
8455d8c0417SDmitry Baryshkov mdp5_kms->rpm_enabled = true;
8465d8c0417SDmitry Baryshkov
84714be3200SRob Clark read_mdp_hw_revision(mdp5_kms, &major, &minor);
84814be3200SRob Clark
84914be3200SRob Clark mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
85014be3200SRob Clark if (IS_ERR(mdp5_kms->cfg)) {
85114be3200SRob Clark ret = PTR_ERR(mdp5_kms->cfg);
85214be3200SRob Clark mdp5_kms->cfg = NULL;
85314be3200SRob Clark goto fail;
85414be3200SRob Clark }
85514be3200SRob Clark
85614be3200SRob Clark config = mdp5_cfg_get_config(mdp5_kms->cfg);
85714be3200SRob Clark mdp5_kms->caps = config->hw->mdp.caps;
85814be3200SRob Clark
85914be3200SRob Clark /* TODO: compute core clock rate at runtime */
86014be3200SRob Clark clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
86114be3200SRob Clark
86214be3200SRob Clark /*
86314be3200SRob Clark * Some chipsets have a Shared Memory Pool (SMP), while others
86414be3200SRob Clark * have dedicated latency buffering per source pipe instead;
86514be3200SRob Clark * this section initializes the SMP:
86614be3200SRob Clark */
86714be3200SRob Clark if (mdp5_kms->caps & MDP_CAP_SMP) {
86814be3200SRob Clark mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
86914be3200SRob Clark if (IS_ERR(mdp5_kms->smp)) {
87014be3200SRob Clark ret = PTR_ERR(mdp5_kms->smp);
87114be3200SRob Clark mdp5_kms->smp = NULL;
87214be3200SRob Clark goto fail;
87314be3200SRob Clark }
87414be3200SRob Clark }
87514be3200SRob Clark
87614be3200SRob Clark mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
87714be3200SRob Clark if (IS_ERR(mdp5_kms->ctlm)) {
87814be3200SRob Clark ret = PTR_ERR(mdp5_kms->ctlm);
87914be3200SRob Clark mdp5_kms->ctlm = NULL;
88014be3200SRob Clark goto fail;
88114be3200SRob Clark }
88214be3200SRob Clark
88314be3200SRob Clark ret = hwpipe_init(mdp5_kms);
88414be3200SRob Clark if (ret)
88514be3200SRob Clark goto fail;
88614be3200SRob Clark
88714be3200SRob Clark ret = hwmixer_init(mdp5_kms);
88814be3200SRob Clark if (ret)
88914be3200SRob Clark goto fail;
89014be3200SRob Clark
89114be3200SRob Clark ret = interface_init(mdp5_kms);
89214be3200SRob Clark if (ret)
89314be3200SRob Clark goto fail;
89414be3200SRob Clark
89514be3200SRob Clark return 0;
89614be3200SRob Clark fail:
897e4337877SRoy Spliet if (mdp5_kms)
898c6122688SDmitry Baryshkov mdp5_destroy(mdp5_kms);
89914be3200SRob Clark return ret;
90014be3200SRob Clark }
90114be3200SRob Clark
mdp5_setup_interconnect(struct platform_device * pdev)90297f90e1bSGeorgi Djakov static int mdp5_setup_interconnect(struct platform_device *pdev)
90397f90e1bSGeorgi Djakov {
9045ccdcecaSDmitry Baryshkov struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
9055ccdcecaSDmitry Baryshkov struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
9065ccdcecaSDmitry Baryshkov struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
90797f90e1bSGeorgi Djakov
90897f90e1bSGeorgi Djakov if (IS_ERR(path0))
90997f90e1bSGeorgi Djakov return PTR_ERR(path0);
91097f90e1bSGeorgi Djakov
91197f90e1bSGeorgi Djakov if (!path0) {
91297f90e1bSGeorgi Djakov /* no interconnect support is not necessarily a fatal
91397f90e1bSGeorgi Djakov * condition, the platform may simply not have an
91497f90e1bSGeorgi Djakov * interconnect driver yet. But warn about it in case
91597f90e1bSGeorgi Djakov * bootloader didn't setup bus clocks high enough for
91697f90e1bSGeorgi Djakov * scanout.
91797f90e1bSGeorgi Djakov */
91897f90e1bSGeorgi Djakov dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
91997f90e1bSGeorgi Djakov return 0;
92097f90e1bSGeorgi Djakov }
92197f90e1bSGeorgi Djakov
92297f90e1bSGeorgi Djakov icc_set_bw(path0, 0, MBps_to_icc(6400));
92397f90e1bSGeorgi Djakov
92497f90e1bSGeorgi Djakov if (!IS_ERR_OR_NULL(path1))
92597f90e1bSGeorgi Djakov icc_set_bw(path1, 0, MBps_to_icc(6400));
92697f90e1bSGeorgi Djakov if (!IS_ERR_OR_NULL(path_rot))
92797f90e1bSGeorgi Djakov icc_set_bw(path_rot, 0, MBps_to_icc(6400));
92897f90e1bSGeorgi Djakov
92997f90e1bSGeorgi Djakov return 0;
93097f90e1bSGeorgi Djakov }
93197f90e1bSGeorgi Djakov
mdp5_dev_probe(struct platform_device * pdev)93214be3200SRob Clark static int mdp5_dev_probe(struct platform_device *pdev)
93314be3200SRob Clark {
93497f90e1bSGeorgi Djakov int ret;
93597f90e1bSGeorgi Djakov
93614be3200SRob Clark DBG("");
93797f90e1bSGeorgi Djakov
93897f90e1bSGeorgi Djakov ret = mdp5_setup_interconnect(pdev);
93997f90e1bSGeorgi Djakov if (ret)
94097f90e1bSGeorgi Djakov return ret;
94197f90e1bSGeorgi Djakov
9426874f48bSDmitry Baryshkov return msm_drv_probe(&pdev->dev, mdp5_kms_init);
94314be3200SRob Clark }
94414be3200SRob Clark
mdp5_dev_remove(struct platform_device * pdev)94514be3200SRob Clark static int mdp5_dev_remove(struct platform_device *pdev)
94614be3200SRob Clark {
94714be3200SRob Clark DBG("");
9486874f48bSDmitry Baryshkov component_master_del(&pdev->dev, &msm_drm_ops);
94914be3200SRob Clark return 0;
95014be3200SRob Clark }
95114be3200SRob Clark
mdp5_runtime_suspend(struct device * dev)95214be3200SRob Clark static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
95314be3200SRob Clark {
95414be3200SRob Clark struct platform_device *pdev = to_platform_device(dev);
955c6122688SDmitry Baryshkov struct msm_drm_private *priv = platform_get_drvdata(pdev);
956c6122688SDmitry Baryshkov struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
95714be3200SRob Clark
95814be3200SRob Clark DBG("");
95914be3200SRob Clark
96014be3200SRob Clark return mdp5_disable(mdp5_kms);
96114be3200SRob Clark }
96214be3200SRob Clark
mdp5_runtime_resume(struct device * dev)96314be3200SRob Clark static __maybe_unused int mdp5_runtime_resume(struct device *dev)
96414be3200SRob Clark {
96514be3200SRob Clark struct platform_device *pdev = to_platform_device(dev);
966c6122688SDmitry Baryshkov struct msm_drm_private *priv = platform_get_drvdata(pdev);
967c6122688SDmitry Baryshkov struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
96814be3200SRob Clark
96914be3200SRob Clark DBG("");
97014be3200SRob Clark
97114be3200SRob Clark return mdp5_enable(mdp5_kms);
97214be3200SRob Clark }
97314be3200SRob Clark
97414be3200SRob Clark static const struct dev_pm_ops mdp5_pm_ops = {
97514be3200SRob Clark SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
9766874f48bSDmitry Baryshkov .prepare = msm_pm_prepare,
9776874f48bSDmitry Baryshkov .complete = msm_pm_complete,
97814be3200SRob Clark };
97914be3200SRob Clark
9806874f48bSDmitry Baryshkov static const struct of_device_id mdp5_dt_match[] = {
98114be3200SRob Clark { .compatible = "qcom,mdp5", },
98214be3200SRob Clark /* to support downstream DT files */
98314be3200SRob Clark { .compatible = "qcom,mdss_mdp", },
98414be3200SRob Clark {}
98514be3200SRob Clark };
98614be3200SRob Clark MODULE_DEVICE_TABLE(of, mdp5_dt_match);
98714be3200SRob Clark
98814be3200SRob Clark static struct platform_driver mdp5_driver = {
98914be3200SRob Clark .probe = mdp5_dev_probe,
99014be3200SRob Clark .remove = mdp5_dev_remove,
99154199009SDouglas Anderson .shutdown = msm_drv_shutdown,
99214be3200SRob Clark .driver = {
99314be3200SRob Clark .name = "msm_mdp",
99414be3200SRob Clark .of_match_table = mdp5_dt_match,
99514be3200SRob Clark .pm = &mdp5_pm_ops,
99614be3200SRob Clark },
99714be3200SRob Clark };
99814be3200SRob Clark
msm_mdp_register(void)99914be3200SRob Clark void __init msm_mdp_register(void)
100014be3200SRob Clark {
100114be3200SRob Clark DBG("");
100214be3200SRob Clark platform_driver_register(&mdp5_driver);
100314be3200SRob Clark }
100414be3200SRob Clark
msm_mdp_unregister(void)100514be3200SRob Clark void __exit msm_mdp_unregister(void)
100614be3200SRob Clark {
100714be3200SRob Clark DBG("");
100814be3200SRob Clark platform_driver_unregister(&mdp5_driver);
100914be3200SRob Clark }
1010