1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5 
6 #include <linux/iopoll.h>
7 
8 #include "dsi_phy.h"
9 #include "dsi.xml.h"
10 
11 static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
12 {
13 	void __iomem *base = phy->base;
14 	u32 data = 0;
15 
16 	data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
17 	mb(); /* make sure read happened */
18 
19 	return (data & BIT(0));
20 }
21 
22 static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
23 {
24 	void __iomem *lane_base = phy->lane_base;
25 	int phy_lane_0 = 0;	/* TODO: Support all lane swap configs */
26 
27 	/*
28 	 * LPRX and CDRX need to enabled only for physical data lane
29 	 * corresponding to the logical data lane 0
30 	 */
31 	if (enable)
32 		dsi_phy_write(lane_base +
33 			      REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
34 	else
35 		dsi_phy_write(lane_base +
36 			      REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
37 }
38 
39 static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
40 {
41 	int i;
42 	const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
43 	const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
44 	const u8 *tx_dctrl = tx_dctrl_0;
45 	void __iomem *lane_base = phy->lane_base;
46 
47 	if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1)
48 		tx_dctrl = tx_dctrl_1;
49 
50 	/* Strength ctrl settings */
51 	for (i = 0; i < 5; i++) {
52 		/*
53 		 * Disable LPRX and CDRX for all lanes. And later on, it will
54 		 * be only enabled for the physical data lane corresponding
55 		 * to the logical data lane 0
56 		 */
57 		dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
58 		dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
59 	}
60 
61 	dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
62 
63 	/* other settings */
64 	for (i = 0; i < 5; i++) {
65 		dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
66 		dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
67 		dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
68 		dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
69 	}
70 }
71 
72 static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
73 			      struct msm_dsi_phy_clk_request *clk_req)
74 {
75 	int ret;
76 	u32 status;
77 	u32 const delay_us = 5;
78 	u32 const timeout_us = 1000;
79 	struct msm_dsi_dphy_timing *timing = &phy->timing;
80 	void __iomem *base = phy->base;
81 	bool less_than_1500_mhz;
82 	u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
83 	u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
84 	u32 data;
85 
86 	DBG("");
87 
88 	if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) {
89 		DRM_DEV_ERROR(&phy->pdev->dev,
90 			"%s: D-PHY timing calculation failed\n", __func__);
91 		return -EINVAL;
92 	}
93 
94 	if (dsi_phy_hw_v4_0_is_pll_on(phy))
95 		pr_warn("PLL turned on before configuring PHY\n");
96 
97 	/* wait for REFGEN READY */
98 	ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
99 					status, (status & BIT(0)),
100 					delay_us, timeout_us);
101 	if (ret) {
102 		pr_err("Ref gen not ready. Aborting\n");
103 		return -EINVAL;
104 	}
105 
106 	/* TODO: CPHY enable path (this is for DPHY only) */
107 
108 	/* Alter PHY configurations if data rate less than 1.5GHZ*/
109 	less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
110 
111 	if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) {
112 		vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
113 		glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d :  0x00;
114 		glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 :  0x3c;
115 		glbl_str_swi_cal_sel_ctrl = 0x00;
116 		glbl_hstx_str_ctrl_0 = 0x88;
117 	} else {
118 		vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
119 		glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
120 		glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
121 		glbl_rescode_top_ctrl = 0x03;
122 		glbl_rescode_bot_ctrl = 0x3c;
123 	}
124 
125 	/* de-assert digital and pll power down */
126 	data = BIT(6) | BIT(5);
127 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
128 
129 	/* Assert PLL core reset */
130 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
131 
132 	/* turn off resync FIFO */
133 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
134 
135 	/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
136 	data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
137 	data = data & (0xf0);
138 	if (data == 0x20)
139 		dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
140 
141 	/* Configure PHY lane swap (TODO: we need to calculate this) */
142 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
143 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
144 
145 	/* Enable LDO */
146 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
147 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c);
148 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
149 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
150 		      glbl_str_swi_cal_sel_ctrl);
151 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
152 		      glbl_hstx_str_ctrl_0);
153 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
154 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
155 		      glbl_rescode_top_ctrl);
156 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
157 		      glbl_rescode_bot_ctrl);
158 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
159 
160 	/* Remove power down from all blocks */
161 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
162 
163 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f);
164 
165 	/* Select full-rate mode */
166 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
167 
168 	ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
169 	if (ret) {
170 		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
171 			__func__, ret);
172 		return ret;
173 	}
174 
175 	/* DSI PHY timings */
176 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
177 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
178 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
179 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
180 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
181 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
182 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
183 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
184 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
185 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
186 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
187 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
188 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
189 		      timing->shared_timings.clk_pre);
190 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
191 		      timing->shared_timings.clk_post);
192 
193 	/* DSI lane settings */
194 	dsi_phy_hw_v4_0_lane_settings(phy);
195 
196 	DBG("DSI%d PHY enabled", phy->id);
197 
198 	return 0;
199 }
200 
201 static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
202 {
203 	void __iomem *base = phy->base;
204 	u32 data;
205 
206 	DBG("");
207 
208 	if (dsi_phy_hw_v4_0_is_pll_on(phy))
209 		pr_warn("Turning OFF PHY while PLL is on\n");
210 
211 	dsi_phy_hw_v4_0_config_lpcdrx(phy, false);
212 	data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
213 
214 	/* disable all lanes */
215 	data &= ~0x1F;
216 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
217 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0);
218 
219 	/* Turn off all PHY blocks */
220 	dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x00);
221 	/* make sure phy is turned off */
222 	wmb();
223 
224 	DBG("DSI%d PHY disabled", phy->id);
225 }
226 
227 static int dsi_7nm_phy_init(struct msm_dsi_phy *phy)
228 {
229 	struct platform_device *pdev = phy->pdev;
230 
231 	phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
232 				     "DSI_PHY_LANE");
233 	if (IS_ERR(phy->lane_base)) {
234 		DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
235 			__func__);
236 		return -ENOMEM;
237 	}
238 
239 	return 0;
240 }
241 
242 const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
243 	.type = MSM_DSI_PHY_7NM_V4_1,
244 	.src_pll_truthtable = { {false, false}, {true, false} },
245 	.reg_cfg = {
246 		.num = 1,
247 		.regs = {
248 			{"vdds", 36000, 32},
249 		},
250 	},
251 	.ops = {
252 		.enable = dsi_7nm_phy_enable,
253 		.disable = dsi_7nm_phy_disable,
254 		.init = dsi_7nm_phy_init,
255 	},
256 	.io_start = { 0xae94400, 0xae96400 },
257 	.num_dsi_phy = 2,
258 };
259 
260 const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
261 	.type = MSM_DSI_PHY_7NM,
262 	.src_pll_truthtable = { {false, false}, {true, false} },
263 	.reg_cfg = {
264 		.num = 1,
265 		.regs = {
266 			{"vdds", 36000, 32},
267 		},
268 	},
269 	.ops = {
270 		.enable = dsi_7nm_phy_enable,
271 		.disable = dsi_7nm_phy_disable,
272 		.init = dsi_7nm_phy_init,
273 	},
274 	.io_start = { 0xae94400, 0xae96400 },
275 	.num_dsi_phy = 2,
276 };
277