1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 
9 #include "dsi_phy.h"
10 #include "dsi.xml.h"
11 
12 /*
13  * DSI PLL 28nm - clock diagram (eg: DSI0):
14  *
15  *         dsi0analog_postdiv_clk
16  *                             |         dsi0indirect_path_div2_clk
17  *                             |          |
18  *                   +------+  |  +----+  |  |\   dsi0byte_mux
19  *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
20  *                |  +------+     +----+     | m|  |  +----+
21  *                |                          | u|--o--| /4 |-- dsi0pllbyte
22  *                |                          | x|     +----+
23  *                o--------------------------| /
24  *                |                          |/
25  *                |          +------+
26  *                o----------| DIV3 |------------------------- dsi0pll
27  *                           +------+
28  */
29 
30 #define POLL_MAX_READS			10
31 #define POLL_TIMEOUT_US		50
32 
33 #define VCO_REF_CLK_RATE		19200000
34 #define VCO_MIN_RATE			350000000
35 #define VCO_MAX_RATE			750000000
36 
37 /* v2.0.0 28nm LP implementation */
38 #define DSI_PHY_28NM_QUIRK_PHY_LP	BIT(0)
39 
40 #define LPFR_LUT_SIZE			10
41 struct lpfr_cfg {
42 	unsigned long vco_rate;
43 	u32 resistance;
44 };
45 
46 /* Loop filter resistance: */
47 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
48 	{ 479500000,  8 },
49 	{ 480000000, 11 },
50 	{ 575500000,  8 },
51 	{ 576000000, 12 },
52 	{ 610500000,  8 },
53 	{ 659500000,  9 },
54 	{ 671500000, 10 },
55 	{ 672000000, 14 },
56 	{ 708500000, 10 },
57 	{ 750000000, 11 },
58 };
59 
60 struct pll_28nm_cached_state {
61 	unsigned long vco_rate;
62 	u8 postdiv3;
63 	u8 postdiv1;
64 	u8 byte_mux;
65 };
66 
67 struct dsi_pll_28nm {
68 	struct clk_hw clk_hw;
69 
70 	struct msm_dsi_phy *phy;
71 
72 	struct pll_28nm_cached_state cached_state;
73 };
74 
75 #define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, clk_hw)
76 
77 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
78 				u32 nb_tries, u32 timeout_us)
79 {
80 	bool pll_locked = false;
81 	u32 val;
82 
83 	while (nb_tries--) {
84 		val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
85 		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
86 
87 		if (pll_locked)
88 			break;
89 
90 		udelay(timeout_us);
91 	}
92 	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
93 
94 	return pll_locked;
95 }
96 
97 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
98 {
99 	void __iomem *base = pll_28nm->phy->pll_base;
100 
101 	/*
102 	 * Add HW recommended delays after toggling the software
103 	 * reset bit off and back on.
104 	 */
105 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
106 			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
107 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
108 }
109 
110 /*
111  * Clock Callbacks
112  */
113 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
114 		unsigned long parent_rate)
115 {
116 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
117 	struct device *dev = &pll_28nm->phy->pdev->dev;
118 	void __iomem *base = pll_28nm->phy->pll_base;
119 	unsigned long div_fbx1000, gen_vco_clk;
120 	u32 refclk_cfg, frac_n_mode, frac_n_value;
121 	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
122 	u32 cal_cfg10, cal_cfg11;
123 	u32 rem;
124 	int i;
125 
126 	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
127 
128 	/* Force postdiv2 to be div-4 */
129 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
130 
131 	/* Configure the Loop filter resistance */
132 	for (i = 0; i < LPFR_LUT_SIZE; i++)
133 		if (rate <= lpfr_lut[i].vco_rate)
134 			break;
135 	if (i == LPFR_LUT_SIZE) {
136 		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
137 				rate);
138 		return -EINVAL;
139 	}
140 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
141 
142 	/* Loop filter capacitance values : c1 and c2 */
143 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
144 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
145 
146 	rem = rate % VCO_REF_CLK_RATE;
147 	if (rem) {
148 		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
149 		frac_n_mode = 1;
150 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
151 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
152 	} else {
153 		refclk_cfg = 0x0;
154 		frac_n_mode = 0;
155 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
156 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
157 	}
158 
159 	DBG("refclk_cfg = %d", refclk_cfg);
160 
161 	rem = div_fbx1000 % 1000;
162 	frac_n_value = (rem << 16) / 1000;
163 
164 	DBG("div_fb = %lu", div_fbx1000);
165 	DBG("frac_n_value = %d", frac_n_value);
166 
167 	DBG("Generated VCO Clock: %lu", gen_vco_clk);
168 	rem = 0;
169 	sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
170 	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
171 	if (frac_n_mode) {
172 		sdm_cfg0 = 0x0;
173 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
174 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
175 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
176 		sdm_cfg3 = frac_n_value >> 8;
177 		sdm_cfg2 = frac_n_value & 0xff;
178 	} else {
179 		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
180 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
181 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
182 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
183 		sdm_cfg2 = 0;
184 		sdm_cfg3 = 0;
185 	}
186 
187 	DBG("sdm_cfg0=%d", sdm_cfg0);
188 	DBG("sdm_cfg1=%d", sdm_cfg1);
189 	DBG("sdm_cfg2=%d", sdm_cfg2);
190 	DBG("sdm_cfg3=%d", sdm_cfg3);
191 
192 	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
193 	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
194 	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
195 
196 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
197 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
198 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
199 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
200 
201 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
202 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
203 		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
204 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
205 		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
206 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
207 
208 	/* Add hardware recommended delay for correct PLL configuration */
209 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
210 		udelay(1000);
211 	else
212 		udelay(1);
213 
214 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
215 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
216 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
217 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
218 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
219 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
220 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
221 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
222 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
223 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
224 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
225 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
226 
227 	return 0;
228 }
229 
230 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
231 {
232 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
233 
234 	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
235 					POLL_TIMEOUT_US);
236 }
237 
238 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
239 		unsigned long parent_rate)
240 {
241 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
242 	void __iomem *base = pll_28nm->phy->pll_base;
243 	u32 sdm0, doubler, sdm_byp_div;
244 	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
245 	u32 ref_clk = VCO_REF_CLK_RATE;
246 	unsigned long vco_rate;
247 
248 	VERB("parent_rate=%lu", parent_rate);
249 
250 	/* Check to see if the ref clk doubler is enabled */
251 	doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
252 			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
253 	ref_clk += (doubler * VCO_REF_CLK_RATE);
254 
255 	/* see if it is integer mode or sdm mode */
256 	sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
257 	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
258 		/* integer mode */
259 		sdm_byp_div = FIELD(
260 				dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
261 				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
262 		vco_rate = ref_clk * sdm_byp_div;
263 	} else {
264 		/* sdm mode */
265 		sdm_dc_off = FIELD(
266 				dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
267 				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
268 		DBG("sdm_dc_off = %d", sdm_dc_off);
269 		sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
270 				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
271 		sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
272 				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
273 		sdm_freq_seed = (sdm3 << 8) | sdm2;
274 		DBG("sdm_freq_seed = %d", sdm_freq_seed);
275 
276 		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
277 			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
278 		DBG("vco rate = %lu", vco_rate);
279 	}
280 
281 	DBG("returning vco rate = %lu", vco_rate);
282 
283 	return vco_rate;
284 }
285 
286 static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
287 {
288 	struct device *dev = &pll_28nm->phy->pdev->dev;
289 	void __iomem *base = pll_28nm->phy->pll_base;
290 	u32 max_reads = 5, timeout_us = 100;
291 	bool locked;
292 	u32 val;
293 	int i;
294 
295 	DBG("id=%d", pll_28nm->phy->id);
296 
297 	pll_28nm_software_reset(pll_28nm);
298 
299 	/*
300 	 * PLL power up sequence.
301 	 * Add necessary delays recommended by hardware.
302 	 */
303 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
304 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
305 
306 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
307 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
308 
309 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
310 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
311 
312 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
313 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
314 
315 	for (i = 0; i < 2; i++) {
316 		/* DSI Uniphy lock detect setting */
317 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
318 				0x0c, 100);
319 		dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
320 
321 		/* poll for PLL ready status */
322 		locked = pll_28nm_poll_for_ready(pll_28nm,
323 						max_reads, timeout_us);
324 		if (locked)
325 			break;
326 
327 		pll_28nm_software_reset(pll_28nm);
328 
329 		/*
330 		 * PLL power up sequence.
331 		 * Add necessary delays recommended by hardware.
332 		 */
333 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
334 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
335 
336 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
337 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
338 
339 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
340 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
341 
342 		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
343 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
344 
345 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
346 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
347 
348 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
349 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
350 	}
351 
352 	if (unlikely(!locked))
353 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
354 	else
355 		DBG("DSI PLL Lock success");
356 
357 	return locked ? 0 : -EINVAL;
358 }
359 
360 static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
361 {
362 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
363 	int i, ret;
364 
365 	if (unlikely(pll_28nm->phy->pll_on))
366 		return 0;
367 
368 	for (i = 0; i < 3; i++) {
369 		ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
370 		if (!ret) {
371 			pll_28nm->phy->pll_on = true;
372 			return 0;
373 		}
374 	}
375 
376 	return ret;
377 }
378 
379 static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
380 {
381 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
382 	struct device *dev = &pll_28nm->phy->pdev->dev;
383 	void __iomem *base = pll_28nm->phy->pll_base;
384 	bool locked;
385 	u32 max_reads = 10, timeout_us = 50;
386 	u32 val;
387 
388 	DBG("id=%d", pll_28nm->phy->id);
389 
390 	if (unlikely(pll_28nm->phy->pll_on))
391 		return 0;
392 
393 	pll_28nm_software_reset(pll_28nm);
394 
395 	/*
396 	 * PLL power up sequence.
397 	 * Add necessary delays recommended by hardware.
398 	 */
399 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
400 
401 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
402 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
403 
404 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
405 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
406 
407 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
408 		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
409 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
410 
411 	/* DSI PLL toggle lock detect setting */
412 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
413 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
414 
415 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
416 
417 	if (unlikely(!locked)) {
418 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
419 		return -EINVAL;
420 	}
421 
422 	DBG("DSI PLL lock success");
423 	pll_28nm->phy->pll_on = true;
424 
425 	return 0;
426 }
427 
428 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
429 {
430 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
431 
432 	DBG("id=%d", pll_28nm->phy->id);
433 
434 	if (unlikely(!pll_28nm->phy->pll_on))
435 		return;
436 
437 	dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
438 
439 	pll_28nm->phy->pll_on = false;
440 }
441 
442 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
443 		unsigned long rate, unsigned long *parent_rate)
444 {
445 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
446 
447 	if      (rate < pll_28nm->phy->cfg->min_pll_rate)
448 		return  pll_28nm->phy->cfg->min_pll_rate;
449 	else if (rate > pll_28nm->phy->cfg->max_pll_rate)
450 		return  pll_28nm->phy->cfg->max_pll_rate;
451 	else
452 		return rate;
453 }
454 
455 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
456 	.round_rate = dsi_pll_28nm_clk_round_rate,
457 	.set_rate = dsi_pll_28nm_clk_set_rate,
458 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
459 	.prepare = dsi_pll_28nm_vco_prepare_hpm,
460 	.unprepare = dsi_pll_28nm_vco_unprepare,
461 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
462 };
463 
464 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
465 	.round_rate = dsi_pll_28nm_clk_round_rate,
466 	.set_rate = dsi_pll_28nm_clk_set_rate,
467 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
468 	.prepare = dsi_pll_28nm_vco_prepare_lp,
469 	.unprepare = dsi_pll_28nm_vco_unprepare,
470 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
471 };
472 
473 /*
474  * PLL Callbacks
475  */
476 
477 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
478 {
479 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
480 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
481 	void __iomem *base = pll_28nm->phy->pll_base;
482 
483 	cached_state->postdiv3 =
484 			dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
485 	cached_state->postdiv1 =
486 			dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
487 	cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
488 	if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
489 		cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
490 	else
491 		cached_state->vco_rate = 0;
492 }
493 
494 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
495 {
496 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
497 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
498 	void __iomem *base = pll_28nm->phy->pll_base;
499 	int ret;
500 
501 	ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
502 					cached_state->vco_rate, 0);
503 	if (ret) {
504 		DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
505 			"restore vco rate failed. ret=%d\n", ret);
506 		return ret;
507 	}
508 
509 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
510 			cached_state->postdiv3);
511 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
512 			cached_state->postdiv1);
513 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
514 			cached_state->byte_mux);
515 
516 	return 0;
517 }
518 
519 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
520 {
521 	char clk_name[32], parent1[32], parent2[32], vco_name[32];
522 	struct clk_init_data vco_init = {
523 		.parent_names = (const char *[]){ "xo" },
524 		.num_parents = 1,
525 		.name = vco_name,
526 		.flags = CLK_IGNORE_UNUSED,
527 	};
528 	struct device *dev = &pll_28nm->phy->pdev->dev;
529 	struct clk_hw *hw;
530 	int ret;
531 
532 	DBG("%d", pll_28nm->phy->id);
533 
534 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
535 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
536 	else
537 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
538 
539 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
540 	pll_28nm->clk_hw.init = &vco_init;
541 	ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
542 	if (ret)
543 		return ret;
544 
545 	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
546 	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
547 	hw = devm_clk_hw_register_divider(dev, clk_name,
548 			parent1, CLK_SET_RATE_PARENT,
549 			pll_28nm->phy->pll_base +
550 			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
551 			0, 4, 0, NULL);
552 	if (IS_ERR(hw))
553 		return PTR_ERR(hw);
554 
555 	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
556 	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
557 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
558 			parent1, CLK_SET_RATE_PARENT,
559 			1, 2);
560 	if (IS_ERR(hw))
561 		return PTR_ERR(hw);
562 
563 	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
564 	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
565 	hw = devm_clk_hw_register_divider(dev, clk_name,
566 				parent1, 0, pll_28nm->phy->pll_base +
567 				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
568 				0, 8, 0, NULL);
569 	if (IS_ERR(hw))
570 		return PTR_ERR(hw);
571 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
572 
573 	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
574 	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
575 	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
576 	hw = devm_clk_hw_register_mux(dev, clk_name,
577 			((const char *[]){
578 				parent1, parent2
579 			}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
580 			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
581 	if (IS_ERR(hw))
582 		return PTR_ERR(hw);
583 
584 	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
585 	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
586 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
587 				parent1, CLK_SET_RATE_PARENT, 1, 4);
588 	if (IS_ERR(hw))
589 		return PTR_ERR(hw);
590 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
591 
592 	return 0;
593 }
594 
595 static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
596 {
597 	struct platform_device *pdev = phy->pdev;
598 	struct dsi_pll_28nm *pll_28nm;
599 	int ret;
600 
601 	if (!pdev)
602 		return -ENODEV;
603 
604 	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
605 	if (!pll_28nm)
606 		return -ENOMEM;
607 
608 	pll_28nm->phy = phy;
609 
610 	ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
611 	if (ret) {
612 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
613 		return ret;
614 	}
615 
616 	phy->vco_hw = &pll_28nm->clk_hw;
617 
618 	return 0;
619 }
620 
621 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
622 		struct msm_dsi_dphy_timing *timing)
623 {
624 	void __iomem *base = phy->base;
625 
626 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
627 		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
628 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
629 		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
630 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
631 		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
632 	if (timing->clk_zero & BIT(8))
633 		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
634 			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
635 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
636 		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
637 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
638 		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
639 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
640 		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
641 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
642 		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
643 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
644 		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
645 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
646 		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
647 		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
648 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
649 		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
650 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
651 		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
652 }
653 
654 static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
655 {
656 	void __iomem *base = phy->reg_base;
657 
658 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
659 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
660 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
661 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
662 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
663 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
664 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
665 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
666 	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
667 }
668 
669 static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
670 {
671 	void __iomem *base = phy->reg_base;
672 
673 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
674 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
675 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
676 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
677 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
678 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
679 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
680 
681 	if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
682 		dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
683 	else
684 		dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
685 }
686 
687 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
688 {
689 	if (!enable) {
690 		dsi_phy_write(phy->reg_base +
691 			      REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
692 		return;
693 	}
694 
695 	if (phy->regulator_ldo_mode)
696 		dsi_28nm_phy_regulator_enable_ldo(phy);
697 	else
698 		dsi_28nm_phy_regulator_enable_dcdc(phy);
699 }
700 
701 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
702 				struct msm_dsi_phy_clk_request *clk_req)
703 {
704 	struct msm_dsi_dphy_timing *timing = &phy->timing;
705 	int i;
706 	void __iomem *base = phy->base;
707 	u32 val;
708 
709 	DBG("");
710 
711 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
712 		DRM_DEV_ERROR(&phy->pdev->dev,
713 			"%s: D-PHY timing calculation failed\n", __func__);
714 		return -EINVAL;
715 	}
716 
717 	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
718 
719 	dsi_28nm_phy_regulator_ctrl(phy, true);
720 
721 	dsi_28nm_dphy_set_timing(phy, timing);
722 
723 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
724 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
725 
726 	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
727 
728 	for (i = 0; i < 4; i++) {
729 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
730 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
731 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
732 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
733 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
734 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
735 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
736 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
737 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
738 	}
739 
740 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
741 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
742 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
743 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
744 
745 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
746 
747 	val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
748 	if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
749 		val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
750 	else
751 		val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
752 	dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
753 
754 	return 0;
755 }
756 
757 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
758 {
759 	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
760 	dsi_28nm_phy_regulator_ctrl(phy, false);
761 
762 	/*
763 	 * Wait for the registers writes to complete in order to
764 	 * ensure that the phy is completely disabled
765 	 */
766 	wmb();
767 }
768 
769 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
770 	.has_phy_regulator = true,
771 	.reg_cfg = {
772 		.num = 1,
773 		.regs = {
774 			{"vddio", 100000, 100},
775 		},
776 	},
777 	.ops = {
778 		.enable = dsi_28nm_phy_enable,
779 		.disable = dsi_28nm_phy_disable,
780 		.pll_init = dsi_pll_28nm_init,
781 		.save_pll_state = dsi_28nm_pll_save_state,
782 		.restore_pll_state = dsi_28nm_pll_restore_state,
783 	},
784 	.min_pll_rate = VCO_MIN_RATE,
785 	.max_pll_rate = VCO_MAX_RATE,
786 	.io_start = { 0xfd922b00, 0xfd923100 },
787 	.num_dsi_phy = 2,
788 };
789 
790 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
791 	.has_phy_regulator = true,
792 	.reg_cfg = {
793 		.num = 1,
794 		.regs = {
795 			{"vddio", 100000, 100},
796 		},
797 	},
798 	.ops = {
799 		.enable = dsi_28nm_phy_enable,
800 		.disable = dsi_28nm_phy_disable,
801 		.pll_init = dsi_pll_28nm_init,
802 		.save_pll_state = dsi_28nm_pll_save_state,
803 		.restore_pll_state = dsi_28nm_pll_restore_state,
804 	},
805 	.min_pll_rate = VCO_MIN_RATE,
806 	.max_pll_rate = VCO_MAX_RATE,
807 	.io_start = { 0x1a94400, 0x1a96400 },
808 	.num_dsi_phy = 2,
809 };
810 
811 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
812 	.has_phy_regulator = true,
813 	.reg_cfg = {
814 		.num = 1,
815 		.regs = {
816 			{"vddio", 100000, 100},	/* 1.8 V */
817 		},
818 	},
819 	.ops = {
820 		.enable = dsi_28nm_phy_enable,
821 		.disable = dsi_28nm_phy_disable,
822 		.pll_init = dsi_pll_28nm_init,
823 		.save_pll_state = dsi_28nm_pll_save_state,
824 		.restore_pll_state = dsi_28nm_pll_restore_state,
825 	},
826 	.min_pll_rate = VCO_MIN_RATE,
827 	.max_pll_rate = VCO_MAX_RATE,
828 	.io_start = { 0x1a98500 },
829 	.num_dsi_phy = 1,
830 	.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
831 };
832 
833