1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/iopoll.h>
9 
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 #include "dsi_phy_10nm.xml.h"
13 
14 /*
15  * DSI PLL 10nm - clock diagram (eg: DSI0):
16  *
17  *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
18  *                              |                |
19  *                              |                |
20  *                 +---------+  |  +----------+  |  +----+
21  *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
22  *                 +---------+  |  +----------+  |  +----+
23  *                              |                |
24  *                              |                |         dsi0_pll_by_2_bit_clk
25  *                              |                |          |
26  *                              |                |  +----+  |  |\  dsi0_pclk_mux
27  *                              |                |--| /2 |--o--| \   |
28  *                              |                |  +----+     |  \  |  +---------+
29  *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
30  *                              |------------------------------|  /     +---------+
31  *                              |          +-----+             | /
32  *                              -----------| /4? |--o----------|/
33  *                                         +-----+  |           |
34  *                                                  |           |dsiclk_sel
35  *                                                  |
36  *                                                  dsi0_pll_post_out_div_clk
37  */
38 
39 #define VCO_REF_CLK_RATE		19200000
40 #define FRAC_BITS 18
41 
42 /* v3.0.0 10nm implementation that requires the old timings settings */
43 #define DSI_PHY_10NM_QUIRK_OLD_TIMINGS	BIT(0)
44 
45 struct dsi_pll_config {
46 	bool enable_ssc;
47 	bool ssc_center;
48 	u32 ssc_freq;
49 	u32 ssc_offset;
50 	u32 ssc_adj_per;
51 
52 	/* out */
53 	u32 pll_prop_gain_rate;
54 	u32 decimal_div_start;
55 	u32 frac_div_start;
56 	u32 pll_clock_inverters;
57 	u32 ssc_stepsize;
58 	u32 ssc_div_per;
59 };
60 
61 struct pll_10nm_cached_state {
62 	unsigned long vco_rate;
63 	u8 bit_clk_div;
64 	u8 pix_clk_div;
65 	u8 pll_out_div;
66 	u8 pll_mux;
67 };
68 
69 struct dsi_pll_10nm {
70 	struct clk_hw clk_hw;
71 
72 	struct msm_dsi_phy *phy;
73 
74 	u64 vco_current_rate;
75 
76 	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
77 	spinlock_t postdiv_lock;
78 
79 	struct pll_10nm_cached_state cached_state;
80 
81 	struct dsi_pll_10nm *slave;
82 };
83 
84 #define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, clk_hw)
85 
86 /**
87  * struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters.
88  * @rescode_offset_top: Offset for pull-up legs rescode.
89  * @rescode_offset_bot: Offset for pull-down legs rescode.
90  * @vreg_ctrl: vreg ctrl to drive LDO level
91  */
92 struct dsi_phy_10nm_tuning_cfg {
93 	u8 rescode_offset_top[DSI_LANE_MAX];
94 	u8 rescode_offset_bot[DSI_LANE_MAX];
95 	u8 vreg_ctrl;
96 };
97 
98 /*
99  * Global list of private DSI PLL struct pointers. We need this for bonded DSI
100  * mode, where the master PLL's clk_ops needs access the slave's private data
101  */
102 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
103 
104 static void dsi_pll_setup_config(struct dsi_pll_config *config)
105 {
106 	config->ssc_freq = 31500;
107 	config->ssc_offset = 5000;
108 	config->ssc_adj_per = 2;
109 
110 	config->enable_ssc = false;
111 	config->ssc_center = false;
112 }
113 
114 static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
115 {
116 	u64 fref = VCO_REF_CLK_RATE;
117 	u64 pll_freq;
118 	u64 divider;
119 	u64 dec, dec_multiple;
120 	u32 frac;
121 	u64 multiplier;
122 
123 	pll_freq = pll->vco_current_rate;
124 
125 	divider = fref * 2;
126 
127 	multiplier = 1 << FRAC_BITS;
128 	dec_multiple = div_u64(pll_freq * multiplier, divider);
129 	dec = div_u64_rem(dec_multiple, multiplier, &frac);
130 
131 	if (pll_freq <= 1900000000UL)
132 		config->pll_prop_gain_rate = 8;
133 	else if (pll_freq <= 3000000000UL)
134 		config->pll_prop_gain_rate = 10;
135 	else
136 		config->pll_prop_gain_rate = 12;
137 	if (pll_freq < 1100000000UL)
138 		config->pll_clock_inverters = 8;
139 	else
140 		config->pll_clock_inverters = 0;
141 
142 	config->decimal_div_start = dec;
143 	config->frac_div_start = frac;
144 }
145 
146 #define SSC_CENTER		BIT(0)
147 #define SSC_EN			BIT(1)
148 
149 static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
150 {
151 	u32 ssc_per;
152 	u32 ssc_mod;
153 	u64 ssc_step_size;
154 	u64 frac;
155 
156 	if (!config->enable_ssc) {
157 		DBG("SSC not enabled\n");
158 		return;
159 	}
160 
161 	ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
162 	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
163 	ssc_per -= ssc_mod;
164 
165 	frac = config->frac_div_start;
166 	ssc_step_size = config->decimal_div_start;
167 	ssc_step_size *= (1 << FRAC_BITS);
168 	ssc_step_size += frac;
169 	ssc_step_size *= config->ssc_offset;
170 	ssc_step_size *= (config->ssc_adj_per + 1);
171 	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
172 	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
173 
174 	config->ssc_div_per = ssc_per;
175 	config->ssc_stepsize = ssc_step_size;
176 
177 	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
178 		 config->decimal_div_start, frac, FRAC_BITS);
179 	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
180 		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
181 }
182 
183 static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
184 {
185 	void __iomem *base = pll->phy->pll_base;
186 
187 	if (config->enable_ssc) {
188 		pr_debug("SSC is enabled\n");
189 
190 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
191 			  config->ssc_stepsize & 0xff);
192 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
193 			  config->ssc_stepsize >> 8);
194 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
195 			  config->ssc_div_per & 0xff);
196 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
197 			  config->ssc_div_per >> 8);
198 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
199 			  config->ssc_adj_per & 0xff);
200 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
201 			  config->ssc_adj_per >> 8);
202 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
203 			  SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
204 	}
205 }
206 
207 static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
208 {
209 	void __iomem *base = pll->phy->pll_base;
210 
211 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
212 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
213 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
214 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
215 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
216 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
217 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
218 		  0xba);
219 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
220 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
221 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
222 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
223 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
224 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
225 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
226 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
227 		  0x4c);
228 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
229 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
230 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
231 }
232 
233 static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
234 {
235 	void __iomem *base = pll->phy->pll_base;
236 
237 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
238 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
239 		  config->decimal_div_start);
240 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
241 		  config->frac_div_start & 0xff);
242 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
243 		  (config->frac_div_start & 0xff00) >> 8);
244 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
245 		  (config->frac_div_start & 0x30000) >> 16);
246 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
247 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
248 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
249 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
250 		  config->pll_clock_inverters);
251 }
252 
253 static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
254 				     unsigned long parent_rate)
255 {
256 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
257 	struct dsi_pll_config config;
258 
259 	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
260 	    parent_rate);
261 
262 	pll_10nm->vco_current_rate = rate;
263 
264 	dsi_pll_setup_config(&config);
265 
266 	dsi_pll_calc_dec_frac(pll_10nm, &config);
267 
268 	dsi_pll_calc_ssc(pll_10nm, &config);
269 
270 	dsi_pll_commit(pll_10nm, &config);
271 
272 	dsi_pll_config_hzindep_reg(pll_10nm);
273 
274 	dsi_pll_ssc_commit(pll_10nm, &config);
275 
276 	/* flush, ensure all register writes are done*/
277 	wmb();
278 
279 	return 0;
280 }
281 
282 static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
283 {
284 	struct device *dev = &pll->phy->pdev->dev;
285 	int rc;
286 	u32 status = 0;
287 	u32 const delay_us = 100;
288 	u32 const timeout_us = 5000;
289 
290 	rc = readl_poll_timeout_atomic(pll->phy->pll_base +
291 				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
292 				       status,
293 				       ((status & BIT(0)) > 0),
294 				       delay_us,
295 				       timeout_us);
296 	if (rc)
297 		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
298 			      pll->phy->id, status);
299 
300 	return rc;
301 }
302 
303 static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
304 {
305 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
306 
307 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
308 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
309 		  data & ~BIT(5));
310 	ndelay(250);
311 }
312 
313 static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
314 {
315 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
316 
317 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
318 		  data | BIT(5));
319 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
320 	ndelay(250);
321 }
322 
323 static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
324 {
325 	u32 data;
326 
327 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
328 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
329 		  data & ~BIT(5));
330 }
331 
332 static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
333 {
334 	u32 data;
335 
336 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
337 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
338 		  data | BIT(5));
339 }
340 
341 static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
342 {
343 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
344 	struct device *dev = &pll_10nm->phy->pdev->dev;
345 	int rc;
346 
347 	dsi_pll_enable_pll_bias(pll_10nm);
348 	if (pll_10nm->slave)
349 		dsi_pll_enable_pll_bias(pll_10nm->slave);
350 
351 	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
352 	if (rc) {
353 		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
354 		return rc;
355 	}
356 
357 	/* Start PLL */
358 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
359 		  0x01);
360 
361 	/*
362 	 * ensure all PLL configurations are written prior to checking
363 	 * for PLL lock.
364 	 */
365 	wmb();
366 
367 	/* Check for PLL lock */
368 	rc = dsi_pll_10nm_lock_status(pll_10nm);
369 	if (rc) {
370 		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
371 		goto error;
372 	}
373 
374 	pll_10nm->phy->pll_on = true;
375 
376 	dsi_pll_enable_global_clk(pll_10nm);
377 	if (pll_10nm->slave)
378 		dsi_pll_enable_global_clk(pll_10nm->slave);
379 
380 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
381 		  0x01);
382 	if (pll_10nm->slave)
383 		dsi_phy_write(pll_10nm->slave->phy->base +
384 			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
385 
386 error:
387 	return rc;
388 }
389 
390 static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
391 {
392 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
393 	dsi_pll_disable_pll_bias(pll);
394 }
395 
396 static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
397 {
398 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
399 
400 	/*
401 	 * To avoid any stray glitches while abruptly powering down the PLL
402 	 * make sure to gate the clock using the clock enable bit before
403 	 * powering down the PLL
404 	 */
405 	dsi_pll_disable_global_clk(pll_10nm);
406 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
407 	dsi_pll_disable_sub(pll_10nm);
408 	if (pll_10nm->slave) {
409 		dsi_pll_disable_global_clk(pll_10nm->slave);
410 		dsi_pll_disable_sub(pll_10nm->slave);
411 	}
412 	/* flush, ensure all register writes are done */
413 	wmb();
414 	pll_10nm->phy->pll_on = false;
415 }
416 
417 static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
418 						  unsigned long parent_rate)
419 {
420 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
421 	void __iomem *base = pll_10nm->phy->pll_base;
422 	u64 ref_clk = VCO_REF_CLK_RATE;
423 	u64 vco_rate = 0x0;
424 	u64 multiplier;
425 	u32 frac;
426 	u32 dec;
427 	u64 pll_freq, tmp64;
428 
429 	dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
430 	dec &= 0xff;
431 
432 	frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
433 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
434 		  0xff) << 8);
435 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
436 		  0x3) << 16);
437 
438 	/*
439 	 * TODO:
440 	 *	1. Assumes prescaler is disabled
441 	 */
442 	multiplier = 1 << FRAC_BITS;
443 	pll_freq = dec * (ref_clk * 2);
444 	tmp64 = (ref_clk * 2 * frac);
445 	pll_freq += div_u64(tmp64, multiplier);
446 
447 	vco_rate = pll_freq;
448 	pll_10nm->vco_current_rate = vco_rate;
449 
450 	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
451 	    pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
452 
453 	return (unsigned long)vco_rate;
454 }
455 
456 static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
457 		unsigned long rate, unsigned long *parent_rate)
458 {
459 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
460 
461 	if      (rate < pll_10nm->phy->cfg->min_pll_rate)
462 		return  pll_10nm->phy->cfg->min_pll_rate;
463 	else if (rate > pll_10nm->phy->cfg->max_pll_rate)
464 		return  pll_10nm->phy->cfg->max_pll_rate;
465 	else
466 		return rate;
467 }
468 
469 static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
470 	.round_rate = dsi_pll_10nm_clk_round_rate,
471 	.set_rate = dsi_pll_10nm_vco_set_rate,
472 	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
473 	.prepare = dsi_pll_10nm_vco_prepare,
474 	.unprepare = dsi_pll_10nm_vco_unprepare,
475 };
476 
477 /*
478  * PLL Callbacks
479  */
480 
481 static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
482 {
483 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
484 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
485 	void __iomem *phy_base = pll_10nm->phy->base;
486 	u32 cmn_clk_cfg0, cmn_clk_cfg1;
487 
488 	cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
489 				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
490 	cached->pll_out_div &= 0x3;
491 
492 	cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
493 	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
494 	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
495 
496 	cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
497 	cached->pll_mux = cmn_clk_cfg1 & 0x3;
498 
499 	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
500 	    pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
501 	    cached->pix_clk_div, cached->pll_mux);
502 }
503 
504 static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
505 {
506 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
507 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
508 	void __iomem *phy_base = pll_10nm->phy->base;
509 	u32 val;
510 	int ret;
511 
512 	val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
513 	val &= ~0x3;
514 	val |= cached->pll_out_div;
515 	dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
516 
517 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
518 		  cached->bit_clk_div | (cached->pix_clk_div << 4));
519 
520 	val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
521 	val &= ~0x3;
522 	val |= cached->pll_mux;
523 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
524 
525 	ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
526 			pll_10nm->vco_current_rate,
527 			VCO_REF_CLK_RATE);
528 	if (ret) {
529 		DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
530 			"restore vco rate failed. ret=%d\n", ret);
531 		return ret;
532 	}
533 
534 	DBG("DSI PLL%d", pll_10nm->phy->id);
535 
536 	return 0;
537 }
538 
539 static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
540 {
541 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
542 	void __iomem *base = phy->base;
543 	u32 data = 0x0;	/* internal PLL */
544 
545 	DBG("DSI PLL%d", pll_10nm->phy->id);
546 
547 	switch (phy->usecase) {
548 	case MSM_DSI_PHY_STANDALONE:
549 		break;
550 	case MSM_DSI_PHY_MASTER:
551 		pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
552 		break;
553 	case MSM_DSI_PHY_SLAVE:
554 		data = 0x1; /* external PLL */
555 		break;
556 	default:
557 		return -EINVAL;
558 	}
559 
560 	/* set PLL src */
561 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
562 
563 	return 0;
564 }
565 
566 /*
567  * The post dividers and mux clocks are created using the standard divider and
568  * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
569  * state to follow the master PLL's divider/mux state. Therefore, we don't
570  * require special clock ops that also configure the slave PLL registers
571  */
572 static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
573 {
574 	char clk_name[32], parent[32], vco_name[32];
575 	char parent2[32], parent3[32], parent4[32];
576 	struct clk_init_data vco_init = {
577 		.parent_data = &(const struct clk_parent_data) {
578 			.fw_name = "ref",
579 		},
580 		.num_parents = 1,
581 		.name = vco_name,
582 		.flags = CLK_IGNORE_UNUSED,
583 		.ops = &clk_ops_dsi_pll_10nm_vco,
584 	};
585 	struct device *dev = &pll_10nm->phy->pdev->dev;
586 	struct clk_hw *hw;
587 	int ret;
588 
589 	DBG("DSI%d", pll_10nm->phy->id);
590 
591 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
592 	pll_10nm->clk_hw.init = &vco_init;
593 
594 	ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
595 	if (ret)
596 		return ret;
597 
598 	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
599 	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
600 
601 	hw = devm_clk_hw_register_divider(dev, clk_name,
602 				     parent, CLK_SET_RATE_PARENT,
603 				     pll_10nm->phy->pll_base +
604 				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
605 				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
606 	if (IS_ERR(hw)) {
607 		ret = PTR_ERR(hw);
608 		goto fail;
609 	}
610 
611 	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
612 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
613 
614 	/* BIT CLK: DIV_CTRL_3_0 */
615 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
616 				     CLK_SET_RATE_PARENT,
617 				     pll_10nm->phy->base +
618 				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
619 				     0, 4, CLK_DIVIDER_ONE_BASED,
620 				     &pll_10nm->postdiv_lock);
621 	if (IS_ERR(hw)) {
622 		ret = PTR_ERR(hw);
623 		goto fail;
624 	}
625 
626 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
627 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
628 
629 	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
630 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
631 					  CLK_SET_RATE_PARENT, 1, 8);
632 	if (IS_ERR(hw)) {
633 		ret = PTR_ERR(hw);
634 		goto fail;
635 	}
636 
637 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
638 
639 	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
640 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
641 
642 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
643 					  0, 1, 2);
644 	if (IS_ERR(hw)) {
645 		ret = PTR_ERR(hw);
646 		goto fail;
647 	}
648 
649 	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
650 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
651 
652 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
653 					  0, 1, 4);
654 	if (IS_ERR(hw)) {
655 		ret = PTR_ERR(hw);
656 		goto fail;
657 	}
658 
659 	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
660 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
661 	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
662 	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
663 	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
664 
665 	hw = devm_clk_hw_register_mux(dev, clk_name,
666 				 ((const char *[]){
667 				 parent, parent2, parent3, parent4
668 				 }), 4, 0, pll_10nm->phy->base +
669 				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
670 				 0, 2, 0, NULL);
671 	if (IS_ERR(hw)) {
672 		ret = PTR_ERR(hw);
673 		goto fail;
674 	}
675 
676 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
677 	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
678 
679 	/* PIX CLK DIV : DIV_CTRL_7_4*/
680 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
681 				     0, pll_10nm->phy->base +
682 					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
683 				     4, 4, CLK_DIVIDER_ONE_BASED,
684 				     &pll_10nm->postdiv_lock);
685 	if (IS_ERR(hw)) {
686 		ret = PTR_ERR(hw);
687 		goto fail;
688 	}
689 
690 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
691 
692 	return 0;
693 
694 fail:
695 
696 	return ret;
697 }
698 
699 static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
700 {
701 	struct platform_device *pdev = phy->pdev;
702 	struct dsi_pll_10nm *pll_10nm;
703 	int ret;
704 
705 	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
706 	if (!pll_10nm)
707 		return -ENOMEM;
708 
709 	DBG("DSI PLL%d", phy->id);
710 
711 	pll_10nm_list[phy->id] = pll_10nm;
712 
713 	spin_lock_init(&pll_10nm->postdiv_lock);
714 
715 	pll_10nm->phy = phy;
716 
717 	ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
718 	if (ret) {
719 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
720 		return ret;
721 	}
722 
723 	phy->vco_hw = &pll_10nm->clk_hw;
724 
725 	/* TODO: Remove this when we have proper display handover support */
726 	msm_dsi_phy_pll_save_state(phy);
727 
728 	return 0;
729 }
730 
731 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
732 {
733 	void __iomem *base = phy->base;
734 	u32 data = 0;
735 
736 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
737 	mb(); /* make sure read happened */
738 
739 	return (data & BIT(0));
740 }
741 
742 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
743 {
744 	void __iomem *lane_base = phy->lane_base;
745 	int phy_lane_0 = 0;	/* TODO: Support all lane swap configs */
746 
747 	/*
748 	 * LPRX and CDRX need to enabled only for physical data lane
749 	 * corresponding to the logical data lane 0
750 	 */
751 	if (enable)
752 		dsi_phy_write(lane_base +
753 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
754 	else
755 		dsi_phy_write(lane_base +
756 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
757 }
758 
759 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
760 {
761 	int i;
762 	u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
763 	void __iomem *lane_base = phy->lane_base;
764 	struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
765 
766 	if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
767 		tx_dctrl[3] = 0x02;
768 
769 	/* Strength ctrl settings */
770 	for (i = 0; i < 5; i++) {
771 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
772 			      0x55);
773 		/*
774 		 * Disable LPRX and CDRX for all lanes. And later on, it will
775 		 * be only enabled for the physical data lane corresponding
776 		 * to the logical data lane 0
777 		 */
778 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
779 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
780 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
781 			      0x88);
782 	}
783 
784 	dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
785 
786 	/* other settings */
787 	for (i = 0; i < 5; i++) {
788 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
789 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
790 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
791 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
792 			      i == 4 ? 0x80 : 0x0);
793 
794 		/* platform specific dsi phy drive strength adjustment */
795 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i),
796 				tuning_cfg->rescode_offset_top[i]);
797 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i),
798 				tuning_cfg->rescode_offset_bot[i]);
799 
800 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
801 			      tx_dctrl[i]);
802 	}
803 
804 	if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
805 		/* Toggle BIT 0 to release freeze I/0 */
806 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
807 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
808 	}
809 }
810 
811 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
812 			       struct msm_dsi_phy_clk_request *clk_req)
813 {
814 	int ret;
815 	u32 status;
816 	u32 const delay_us = 5;
817 	u32 const timeout_us = 1000;
818 	struct msm_dsi_dphy_timing *timing = &phy->timing;
819 	void __iomem *base = phy->base;
820 	struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
821 	u32 data;
822 
823 	DBG("");
824 
825 	if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
826 		DRM_DEV_ERROR(&phy->pdev->dev,
827 			"%s: D-PHY timing calculation failed\n", __func__);
828 		return -EINVAL;
829 	}
830 
831 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
832 		pr_warn("PLL turned on before configuring PHY\n");
833 
834 	/* wait for REFGEN READY */
835 	ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
836 					status, (status & BIT(0)),
837 					delay_us, timeout_us);
838 	if (ret) {
839 		pr_err("Ref gen not ready. Aborting\n");
840 		return -EINVAL;
841 	}
842 
843 	/* de-assert digital and pll power down */
844 	data = BIT(6) | BIT(5);
845 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
846 
847 	/* Assert PLL core reset */
848 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
849 
850 	/* turn off resync FIFO */
851 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
852 
853 	/* Select MS1 byte-clk */
854 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
855 
856 	/* Enable LDO with platform specific drive level/amplitude adjustment */
857 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL,
858 		      tuning_cfg->vreg_ctrl);
859 
860 	/* Configure PHY lane swap (TODO: we need to calculate this) */
861 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
862 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
863 
864 	/* DSI PHY timings */
865 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
866 		      timing->hs_halfbyte_en);
867 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
868 		      timing->clk_zero);
869 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
870 		      timing->clk_prepare);
871 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
872 		      timing->clk_trail);
873 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
874 		      timing->hs_exit);
875 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
876 		      timing->hs_zero);
877 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
878 		      timing->hs_prepare);
879 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
880 		      timing->hs_trail);
881 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
882 		      timing->hs_rqst);
883 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
884 		      timing->ta_go | (timing->ta_sure << 3));
885 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
886 		      timing->ta_get);
887 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
888 		      0x00);
889 
890 	/* Remove power down from all blocks */
891 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
892 
893 	/* power up lanes */
894 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
895 
896 	/* TODO: only power up lanes that are used */
897 	data |= 0x1F;
898 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
899 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
900 
901 	/* Select full-rate mode */
902 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
903 
904 	ret = dsi_10nm_set_usecase(phy);
905 	if (ret) {
906 		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
907 			__func__, ret);
908 		return ret;
909 	}
910 
911 	/* DSI lane settings */
912 	dsi_phy_hw_v3_0_lane_settings(phy);
913 
914 	DBG("DSI%d PHY enabled", phy->id);
915 
916 	return 0;
917 }
918 
919 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
920 {
921 	void __iomem *base = phy->base;
922 	u32 data;
923 
924 	DBG("");
925 
926 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
927 		pr_warn("Turning OFF PHY while PLL is on\n");
928 
929 	dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
930 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
931 
932 	/* disable all lanes */
933 	data &= ~0x1F;
934 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
935 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
936 
937 	/* Turn off all PHY blocks */
938 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
939 	/* make sure phy is turned off */
940 	wmb();
941 
942 	DBG("DSI%d PHY disabled", phy->id);
943 }
944 
945 static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
946 {
947 	struct device *dev = &phy->pdev->dev;
948 	struct dsi_phy_10nm_tuning_cfg *tuning_cfg;
949 	s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */
950 	s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */
951 	u32 ldo_level = 400; /* 400mV */
952 	u8 level;
953 	int ret, i;
954 
955 	tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL);
956 	if (!tuning_cfg)
957 		return -ENOMEM;
958 
959 	/* Drive strength adjustment parameters */
960 	ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top",
961 					offset_top, DSI_LANE_MAX);
962 	if (ret && ret != -EINVAL) {
963 		DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret);
964 		return ret;
965 	}
966 
967 	for (i = 0; i < DSI_LANE_MAX; i++) {
968 		if (offset_top[i] < -32 || offset_top[i] > 31) {
969 			DRM_DEV_ERROR(dev,
970 				"qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n",
971 				offset_top[i]);
972 			return -EINVAL;
973 		}
974 		tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i];
975 	}
976 
977 	ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot",
978 					offset_bot, DSI_LANE_MAX);
979 	if (ret && ret != -EINVAL) {
980 		DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret);
981 		return ret;
982 	}
983 
984 	for (i = 0; i < DSI_LANE_MAX; i++) {
985 		if (offset_bot[i] < -32 || offset_bot[i] > 31) {
986 			DRM_DEV_ERROR(dev,
987 				"qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n",
988 				offset_bot[i]);
989 			return -EINVAL;
990 		}
991 		tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i];
992 	}
993 
994 	/* Drive level/amplitude adjustment parameters */
995 	ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level);
996 	if (ret && ret != -EINVAL) {
997 		DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret);
998 		return ret;
999 	}
1000 
1001 	switch (ldo_level) {
1002 	case 375:
1003 		level = 0;
1004 		break;
1005 	case 400:
1006 		level = 1;
1007 		break;
1008 	case 425:
1009 		level = 2;
1010 		break;
1011 	case 450:
1012 		level = 3;
1013 		break;
1014 	case 475:
1015 		level = 4;
1016 		break;
1017 	case 500:
1018 		level = 5;
1019 		break;
1020 	default:
1021 		DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level);
1022 		return -EINVAL;
1023 	}
1024 	tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level);
1025 
1026 	phy->tuning_cfg = tuning_cfg;
1027 
1028 	return 0;
1029 }
1030 
1031 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
1032 	.has_phy_lane = true,
1033 	.reg_cfg = {
1034 		.num = 1,
1035 		.regs = {
1036 			{"vdds", 36000, 32},
1037 		},
1038 	},
1039 	.ops = {
1040 		.enable = dsi_10nm_phy_enable,
1041 		.disable = dsi_10nm_phy_disable,
1042 		.pll_init = dsi_pll_10nm_init,
1043 		.save_pll_state = dsi_10nm_pll_save_state,
1044 		.restore_pll_state = dsi_10nm_pll_restore_state,
1045 		.parse_dt_properties = dsi_10nm_phy_parse_dt,
1046 	},
1047 	.min_pll_rate = 1000000000UL,
1048 	.max_pll_rate = 3500000000UL,
1049 	.io_start = { 0xae94400, 0xae96400 },
1050 	.num_dsi_phy = 2,
1051 };
1052 
1053 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
1054 	.has_phy_lane = true,
1055 	.reg_cfg = {
1056 		.num = 1,
1057 		.regs = {
1058 			{"vdds", 36000, 32},
1059 		},
1060 	},
1061 	.ops = {
1062 		.enable = dsi_10nm_phy_enable,
1063 		.disable = dsi_10nm_phy_disable,
1064 		.pll_init = dsi_pll_10nm_init,
1065 		.save_pll_state = dsi_10nm_pll_save_state,
1066 		.restore_pll_state = dsi_10nm_pll_restore_state,
1067 		.parse_dt_properties = dsi_10nm_phy_parse_dt,
1068 	},
1069 	.min_pll_rate = 1000000000UL,
1070 	.max_pll_rate = 3500000000UL,
1071 	.io_start = { 0xc994400, 0xc996400 },
1072 	.num_dsi_phy = 2,
1073 	.quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
1074 };
1075