1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 
9 #include "dsi_phy.h"
10 #include "dsi.xml.h"
11 #include "dsi_phy_28nm.xml.h"
12 
13 /*
14  * DSI PLL 28nm - clock diagram (eg: DSI0):
15  *
16  *         dsi0analog_postdiv_clk
17  *                             |         dsi0indirect_path_div2_clk
18  *                             |          |
19  *                   +------+  |  +----+  |  |\   dsi0byte_mux
20  *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
21  *                |  +------+     +----+     | m|  |  +----+
22  *                |                          | u|--o--| /4 |-- dsi0pllbyte
23  *                |                          | x|     +----+
24  *                o--------------------------| /
25  *                |                          |/
26  *                |          +------+
27  *                o----------| DIV3 |------------------------- dsi0pll
28  *                           +------+
29  */
30 
31 #define POLL_MAX_READS			10
32 #define POLL_TIMEOUT_US		50
33 
34 #define VCO_REF_CLK_RATE		19200000
35 #define VCO_MIN_RATE			350000000
36 #define VCO_MAX_RATE			750000000
37 
38 /* v2.0.0 28nm LP implementation */
39 #define DSI_PHY_28NM_QUIRK_PHY_LP	BIT(0)
40 
41 #define LPFR_LUT_SIZE			10
42 struct lpfr_cfg {
43 	unsigned long vco_rate;
44 	u32 resistance;
45 };
46 
47 /* Loop filter resistance: */
48 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
49 	{ 479500000,  8 },
50 	{ 480000000, 11 },
51 	{ 575500000,  8 },
52 	{ 576000000, 12 },
53 	{ 610500000,  8 },
54 	{ 659500000,  9 },
55 	{ 671500000, 10 },
56 	{ 672000000, 14 },
57 	{ 708500000, 10 },
58 	{ 750000000, 11 },
59 };
60 
61 struct pll_28nm_cached_state {
62 	unsigned long vco_rate;
63 	u8 postdiv3;
64 	u8 postdiv1;
65 	u8 byte_mux;
66 };
67 
68 struct dsi_pll_28nm {
69 	struct clk_hw clk_hw;
70 
71 	struct msm_dsi_phy *phy;
72 
73 	struct pll_28nm_cached_state cached_state;
74 };
75 
76 #define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, clk_hw)
77 
78 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
79 				u32 nb_tries, u32 timeout_us)
80 {
81 	bool pll_locked = false;
82 	u32 val;
83 
84 	while (nb_tries--) {
85 		val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
86 		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
87 
88 		if (pll_locked)
89 			break;
90 
91 		udelay(timeout_us);
92 	}
93 	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
94 
95 	return pll_locked;
96 }
97 
98 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
99 {
100 	void __iomem *base = pll_28nm->phy->pll_base;
101 
102 	/*
103 	 * Add HW recommended delays after toggling the software
104 	 * reset bit off and back on.
105 	 */
106 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
107 			DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
108 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
109 }
110 
111 /*
112  * Clock Callbacks
113  */
114 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
115 		unsigned long parent_rate)
116 {
117 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
118 	struct device *dev = &pll_28nm->phy->pdev->dev;
119 	void __iomem *base = pll_28nm->phy->pll_base;
120 	unsigned long div_fbx1000, gen_vco_clk;
121 	u32 refclk_cfg, frac_n_mode, frac_n_value;
122 	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
123 	u32 cal_cfg10, cal_cfg11;
124 	u32 rem;
125 	int i;
126 
127 	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
128 
129 	/* Force postdiv2 to be div-4 */
130 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
131 
132 	/* Configure the Loop filter resistance */
133 	for (i = 0; i < LPFR_LUT_SIZE; i++)
134 		if (rate <= lpfr_lut[i].vco_rate)
135 			break;
136 	if (i == LPFR_LUT_SIZE) {
137 		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
138 				rate);
139 		return -EINVAL;
140 	}
141 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
142 
143 	/* Loop filter capacitance values : c1 and c2 */
144 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
145 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
146 
147 	rem = rate % VCO_REF_CLK_RATE;
148 	if (rem) {
149 		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
150 		frac_n_mode = 1;
151 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
152 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
153 	} else {
154 		refclk_cfg = 0x0;
155 		frac_n_mode = 0;
156 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
157 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
158 	}
159 
160 	DBG("refclk_cfg = %d", refclk_cfg);
161 
162 	rem = div_fbx1000 % 1000;
163 	frac_n_value = (rem << 16) / 1000;
164 
165 	DBG("div_fb = %lu", div_fbx1000);
166 	DBG("frac_n_value = %d", frac_n_value);
167 
168 	DBG("Generated VCO Clock: %lu", gen_vco_clk);
169 	rem = 0;
170 	sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
171 	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
172 	if (frac_n_mode) {
173 		sdm_cfg0 = 0x0;
174 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
175 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
176 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
177 		sdm_cfg3 = frac_n_value >> 8;
178 		sdm_cfg2 = frac_n_value & 0xff;
179 	} else {
180 		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
181 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
182 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
183 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
184 		sdm_cfg2 = 0;
185 		sdm_cfg3 = 0;
186 	}
187 
188 	DBG("sdm_cfg0=%d", sdm_cfg0);
189 	DBG("sdm_cfg1=%d", sdm_cfg1);
190 	DBG("sdm_cfg2=%d", sdm_cfg2);
191 	DBG("sdm_cfg3=%d", sdm_cfg3);
192 
193 	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
194 	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
195 	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
196 
197 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
198 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
199 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
200 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
201 
202 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
203 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
204 		DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
205 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
206 		DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
207 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
208 
209 	/* Add hardware recommended delay for correct PLL configuration */
210 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
211 		udelay(1000);
212 	else
213 		udelay(1);
214 
215 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
216 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
217 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
218 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
219 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
220 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
221 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
222 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
223 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
224 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
225 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
226 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
227 
228 	return 0;
229 }
230 
231 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
232 {
233 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
234 
235 	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
236 					POLL_TIMEOUT_US);
237 }
238 
239 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
240 		unsigned long parent_rate)
241 {
242 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
243 	void __iomem *base = pll_28nm->phy->pll_base;
244 	u32 sdm0, doubler, sdm_byp_div;
245 	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
246 	u32 ref_clk = VCO_REF_CLK_RATE;
247 	unsigned long vco_rate;
248 
249 	VERB("parent_rate=%lu", parent_rate);
250 
251 	/* Check to see if the ref clk doubler is enabled */
252 	doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
253 			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
254 	ref_clk += (doubler * VCO_REF_CLK_RATE);
255 
256 	/* see if it is integer mode or sdm mode */
257 	sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
258 	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
259 		/* integer mode */
260 		sdm_byp_div = FIELD(
261 				dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
262 				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
263 		vco_rate = ref_clk * sdm_byp_div;
264 	} else {
265 		/* sdm mode */
266 		sdm_dc_off = FIELD(
267 				dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
268 				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
269 		DBG("sdm_dc_off = %d", sdm_dc_off);
270 		sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
271 				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
272 		sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
273 				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
274 		sdm_freq_seed = (sdm3 << 8) | sdm2;
275 		DBG("sdm_freq_seed = %d", sdm_freq_seed);
276 
277 		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
278 			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
279 		DBG("vco rate = %lu", vco_rate);
280 	}
281 
282 	DBG("returning vco rate = %lu", vco_rate);
283 
284 	return vco_rate;
285 }
286 
287 static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
288 {
289 	struct device *dev = &pll_28nm->phy->pdev->dev;
290 	void __iomem *base = pll_28nm->phy->pll_base;
291 	u32 max_reads = 5, timeout_us = 100;
292 	bool locked;
293 	u32 val;
294 	int i;
295 
296 	DBG("id=%d", pll_28nm->phy->id);
297 
298 	pll_28nm_software_reset(pll_28nm);
299 
300 	/*
301 	 * PLL power up sequence.
302 	 * Add necessary delays recommended by hardware.
303 	 */
304 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
305 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
306 
307 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
308 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
309 
310 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
311 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
312 
313 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
314 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
315 
316 	for (i = 0; i < 2; i++) {
317 		/* DSI Uniphy lock detect setting */
318 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
319 				0x0c, 100);
320 		dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
321 
322 		/* poll for PLL ready status */
323 		locked = pll_28nm_poll_for_ready(pll_28nm,
324 						max_reads, timeout_us);
325 		if (locked)
326 			break;
327 
328 		pll_28nm_software_reset(pll_28nm);
329 
330 		/*
331 		 * PLL power up sequence.
332 		 * Add necessary delays recommended by hardware.
333 		 */
334 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
335 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
336 
337 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
338 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
339 
340 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
341 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
342 
343 		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
344 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
345 
346 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
347 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
348 
349 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
350 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
351 	}
352 
353 	if (unlikely(!locked))
354 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
355 	else
356 		DBG("DSI PLL Lock success");
357 
358 	return locked ? 0 : -EINVAL;
359 }
360 
361 static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
362 {
363 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
364 	int i, ret;
365 
366 	if (unlikely(pll_28nm->phy->pll_on))
367 		return 0;
368 
369 	for (i = 0; i < 3; i++) {
370 		ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
371 		if (!ret) {
372 			pll_28nm->phy->pll_on = true;
373 			return 0;
374 		}
375 	}
376 
377 	return ret;
378 }
379 
380 static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
381 {
382 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
383 	struct device *dev = &pll_28nm->phy->pdev->dev;
384 	void __iomem *base = pll_28nm->phy->pll_base;
385 	bool locked;
386 	u32 max_reads = 10, timeout_us = 50;
387 	u32 val;
388 
389 	DBG("id=%d", pll_28nm->phy->id);
390 
391 	if (unlikely(pll_28nm->phy->pll_on))
392 		return 0;
393 
394 	pll_28nm_software_reset(pll_28nm);
395 
396 	/*
397 	 * PLL power up sequence.
398 	 * Add necessary delays recommended by hardware.
399 	 */
400 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
401 
402 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
403 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
404 
405 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
406 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
407 
408 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
409 		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
410 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
411 
412 	/* DSI PLL toggle lock detect setting */
413 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
414 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
415 
416 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
417 
418 	if (unlikely(!locked)) {
419 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
420 		return -EINVAL;
421 	}
422 
423 	DBG("DSI PLL lock success");
424 	pll_28nm->phy->pll_on = true;
425 
426 	return 0;
427 }
428 
429 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
430 {
431 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
432 
433 	DBG("id=%d", pll_28nm->phy->id);
434 
435 	if (unlikely(!pll_28nm->phy->pll_on))
436 		return;
437 
438 	dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
439 
440 	pll_28nm->phy->pll_on = false;
441 }
442 
443 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
444 		unsigned long rate, unsigned long *parent_rate)
445 {
446 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
447 
448 	if      (rate < pll_28nm->phy->cfg->min_pll_rate)
449 		return  pll_28nm->phy->cfg->min_pll_rate;
450 	else if (rate > pll_28nm->phy->cfg->max_pll_rate)
451 		return  pll_28nm->phy->cfg->max_pll_rate;
452 	else
453 		return rate;
454 }
455 
456 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
457 	.round_rate = dsi_pll_28nm_clk_round_rate,
458 	.set_rate = dsi_pll_28nm_clk_set_rate,
459 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
460 	.prepare = dsi_pll_28nm_vco_prepare_hpm,
461 	.unprepare = dsi_pll_28nm_vco_unprepare,
462 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
463 };
464 
465 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
466 	.round_rate = dsi_pll_28nm_clk_round_rate,
467 	.set_rate = dsi_pll_28nm_clk_set_rate,
468 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
469 	.prepare = dsi_pll_28nm_vco_prepare_lp,
470 	.unprepare = dsi_pll_28nm_vco_unprepare,
471 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
472 };
473 
474 /*
475  * PLL Callbacks
476  */
477 
478 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
479 {
480 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
481 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
482 	void __iomem *base = pll_28nm->phy->pll_base;
483 
484 	cached_state->postdiv3 =
485 			dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
486 	cached_state->postdiv1 =
487 			dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
488 	cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
489 	if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
490 		cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
491 	else
492 		cached_state->vco_rate = 0;
493 }
494 
495 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
496 {
497 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
498 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
499 	void __iomem *base = pll_28nm->phy->pll_base;
500 	int ret;
501 
502 	ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
503 					cached_state->vco_rate, 0);
504 	if (ret) {
505 		DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
506 			"restore vco rate failed. ret=%d\n", ret);
507 		return ret;
508 	}
509 
510 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
511 			cached_state->postdiv3);
512 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
513 			cached_state->postdiv1);
514 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
515 			cached_state->byte_mux);
516 
517 	return 0;
518 }
519 
520 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
521 {
522 	char clk_name[32], parent1[32], parent2[32], vco_name[32];
523 	struct clk_init_data vco_init = {
524 		.parent_data = &(const struct clk_parent_data) {
525 			.fw_name = "ref", .name = "xo",
526 		},
527 		.num_parents = 1,
528 		.name = vco_name,
529 		.flags = CLK_IGNORE_UNUSED,
530 	};
531 	struct device *dev = &pll_28nm->phy->pdev->dev;
532 	struct clk_hw *hw;
533 	int ret;
534 
535 	DBG("%d", pll_28nm->phy->id);
536 
537 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
538 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
539 	else
540 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
541 
542 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
543 	pll_28nm->clk_hw.init = &vco_init;
544 	ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
545 	if (ret)
546 		return ret;
547 
548 	snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
549 	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
550 	hw = devm_clk_hw_register_divider(dev, clk_name,
551 			parent1, CLK_SET_RATE_PARENT,
552 			pll_28nm->phy->pll_base +
553 			REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
554 			0, 4, 0, NULL);
555 	if (IS_ERR(hw))
556 		return PTR_ERR(hw);
557 
558 	snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
559 	snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
560 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
561 			parent1, CLK_SET_RATE_PARENT,
562 			1, 2);
563 	if (IS_ERR(hw))
564 		return PTR_ERR(hw);
565 
566 	snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
567 	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
568 	hw = devm_clk_hw_register_divider(dev, clk_name,
569 				parent1, 0, pll_28nm->phy->pll_base +
570 				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
571 				0, 8, 0, NULL);
572 	if (IS_ERR(hw))
573 		return PTR_ERR(hw);
574 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
575 
576 	snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
577 	snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
578 	snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
579 	hw = devm_clk_hw_register_mux(dev, clk_name,
580 			((const char *[]){
581 				parent1, parent2
582 			}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
583 			REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
584 	if (IS_ERR(hw))
585 		return PTR_ERR(hw);
586 
587 	snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
588 	snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
589 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
590 				parent1, CLK_SET_RATE_PARENT, 1, 4);
591 	if (IS_ERR(hw))
592 		return PTR_ERR(hw);
593 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
594 
595 	return 0;
596 }
597 
598 static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
599 {
600 	struct platform_device *pdev = phy->pdev;
601 	struct dsi_pll_28nm *pll_28nm;
602 	int ret;
603 
604 	if (!pdev)
605 		return -ENODEV;
606 
607 	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
608 	if (!pll_28nm)
609 		return -ENOMEM;
610 
611 	pll_28nm->phy = phy;
612 
613 	ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
614 	if (ret) {
615 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
616 		return ret;
617 	}
618 
619 	phy->vco_hw = &pll_28nm->clk_hw;
620 
621 	return 0;
622 }
623 
624 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
625 		struct msm_dsi_dphy_timing *timing)
626 {
627 	void __iomem *base = phy->base;
628 
629 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
630 		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
631 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
632 		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
633 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
634 		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
635 	if (timing->clk_zero & BIT(8))
636 		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
637 			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
638 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
639 		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
640 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
641 		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
642 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
643 		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
644 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
645 		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
646 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
647 		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
648 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
649 		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
650 		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
651 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
652 		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
653 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
654 		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
655 }
656 
657 static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
658 {
659 	void __iomem *base = phy->reg_base;
660 
661 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
662 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
663 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
664 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
665 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
666 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
667 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
668 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
669 	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
670 }
671 
672 static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
673 {
674 	void __iomem *base = phy->reg_base;
675 
676 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
677 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
678 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
679 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
680 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
681 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
682 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
683 
684 	if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
685 		dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
686 	else
687 		dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
688 }
689 
690 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
691 {
692 	if (!enable) {
693 		dsi_phy_write(phy->reg_base +
694 			      REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
695 		return;
696 	}
697 
698 	if (phy->regulator_ldo_mode)
699 		dsi_28nm_phy_regulator_enable_ldo(phy);
700 	else
701 		dsi_28nm_phy_regulator_enable_dcdc(phy);
702 }
703 
704 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
705 				struct msm_dsi_phy_clk_request *clk_req)
706 {
707 	struct msm_dsi_dphy_timing *timing = &phy->timing;
708 	int i;
709 	void __iomem *base = phy->base;
710 	u32 val;
711 
712 	DBG("");
713 
714 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
715 		DRM_DEV_ERROR(&phy->pdev->dev,
716 			"%s: D-PHY timing calculation failed\n", __func__);
717 		return -EINVAL;
718 	}
719 
720 	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
721 
722 	dsi_28nm_phy_regulator_ctrl(phy, true);
723 
724 	dsi_28nm_dphy_set_timing(phy, timing);
725 
726 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
727 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
728 
729 	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
730 
731 	for (i = 0; i < 4; i++) {
732 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
733 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
734 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
735 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
736 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
737 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
738 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
739 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
740 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
741 	}
742 
743 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
744 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
745 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
746 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
747 
748 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
749 
750 	val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
751 	if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
752 		val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
753 	else
754 		val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
755 	dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
756 
757 	return 0;
758 }
759 
760 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
761 {
762 	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
763 	dsi_28nm_phy_regulator_ctrl(phy, false);
764 
765 	/*
766 	 * Wait for the registers writes to complete in order to
767 	 * ensure that the phy is completely disabled
768 	 */
769 	wmb();
770 }
771 
772 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
773 	.has_phy_regulator = true,
774 	.reg_cfg = {
775 		.num = 1,
776 		.regs = {
777 			{"vddio", 100000, 100},
778 		},
779 	},
780 	.ops = {
781 		.enable = dsi_28nm_phy_enable,
782 		.disable = dsi_28nm_phy_disable,
783 		.pll_init = dsi_pll_28nm_init,
784 		.save_pll_state = dsi_28nm_pll_save_state,
785 		.restore_pll_state = dsi_28nm_pll_restore_state,
786 	},
787 	.min_pll_rate = VCO_MIN_RATE,
788 	.max_pll_rate = VCO_MAX_RATE,
789 	.io_start = { 0xfd922b00, 0xfd923100 },
790 	.num_dsi_phy = 2,
791 };
792 
793 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
794 	.has_phy_regulator = true,
795 	.reg_cfg = {
796 		.num = 1,
797 		.regs = {
798 			{"vddio", 100000, 100},
799 		},
800 	},
801 	.ops = {
802 		.enable = dsi_28nm_phy_enable,
803 		.disable = dsi_28nm_phy_disable,
804 		.pll_init = dsi_pll_28nm_init,
805 		.save_pll_state = dsi_28nm_pll_save_state,
806 		.restore_pll_state = dsi_28nm_pll_restore_state,
807 	},
808 	.min_pll_rate = VCO_MIN_RATE,
809 	.max_pll_rate = VCO_MAX_RATE,
810 	.io_start = { 0x1a94400, 0x1a96400 },
811 	.num_dsi_phy = 2,
812 };
813 
814 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
815 	.has_phy_regulator = true,
816 	.reg_cfg = {
817 		.num = 1,
818 		.regs = {
819 			{"vddio", 100000, 100},	/* 1.8 V */
820 		},
821 	},
822 	.ops = {
823 		.enable = dsi_28nm_phy_enable,
824 		.disable = dsi_28nm_phy_disable,
825 		.pll_init = dsi_pll_28nm_init,
826 		.save_pll_state = dsi_28nm_pll_save_state,
827 		.restore_pll_state = dsi_28nm_pll_restore_state,
828 	},
829 	.min_pll_rate = VCO_MIN_RATE,
830 	.max_pll_rate = VCO_MAX_RATE,
831 	.io_start = { 0x1a98500 },
832 	.num_dsi_phy = 1,
833 	.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
834 };
835 
836