1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/iopoll.h>
9 
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 #include "dsi_phy_10nm.xml.h"
13 
14 /*
15  * DSI PLL 10nm - clock diagram (eg: DSI0):
16  *
17  *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
18  *                              |                |
19  *                              |                |
20  *                 +---------+  |  +----------+  |  +----+
21  *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
22  *                 +---------+  |  +----------+  |  +----+
23  *                              |                |
24  *                              |                |         dsi0_pll_by_2_bit_clk
25  *                              |                |          |
26  *                              |                |  +----+  |  |\  dsi0_pclk_mux
27  *                              |                |--| /2 |--o--| \   |
28  *                              |                |  +----+     |  \  |  +---------+
29  *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
30  *                              |------------------------------|  /     +---------+
31  *                              |          +-----+             | /
32  *                              -----------| /4? |--o----------|/
33  *                                         +-----+  |           |
34  *                                                  |           |dsiclk_sel
35  *                                                  |
36  *                                                  dsi0_pll_post_out_div_clk
37  */
38 
39 #define VCO_REF_CLK_RATE		19200000
40 #define FRAC_BITS 18
41 
42 /* v3.0.0 10nm implementation that requires the old timings settings */
43 #define DSI_PHY_10NM_QUIRK_OLD_TIMINGS	BIT(0)
44 
45 struct dsi_pll_config {
46 	bool enable_ssc;
47 	bool ssc_center;
48 	u32 ssc_freq;
49 	u32 ssc_offset;
50 	u32 ssc_adj_per;
51 
52 	/* out */
53 	u32 pll_prop_gain_rate;
54 	u32 decimal_div_start;
55 	u32 frac_div_start;
56 	u32 pll_clock_inverters;
57 	u32 ssc_stepsize;
58 	u32 ssc_div_per;
59 };
60 
61 struct pll_10nm_cached_state {
62 	unsigned long vco_rate;
63 	u8 bit_clk_div;
64 	u8 pix_clk_div;
65 	u8 pll_out_div;
66 	u8 pll_mux;
67 };
68 
69 struct dsi_pll_10nm {
70 	struct clk_hw clk_hw;
71 
72 	struct msm_dsi_phy *phy;
73 
74 	u64 vco_current_rate;
75 
76 	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
77 	spinlock_t postdiv_lock;
78 
79 	struct pll_10nm_cached_state cached_state;
80 
81 	struct dsi_pll_10nm *slave;
82 };
83 
84 #define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, clk_hw)
85 
86 /*
87  * Global list of private DSI PLL struct pointers. We need this for bonded DSI
88  * mode, where the master PLL's clk_ops needs access the slave's private data
89  */
90 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
91 
92 static void dsi_pll_setup_config(struct dsi_pll_config *config)
93 {
94 	config->ssc_freq = 31500;
95 	config->ssc_offset = 5000;
96 	config->ssc_adj_per = 2;
97 
98 	config->enable_ssc = false;
99 	config->ssc_center = false;
100 }
101 
102 static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
103 {
104 	u64 fref = VCO_REF_CLK_RATE;
105 	u64 pll_freq;
106 	u64 divider;
107 	u64 dec, dec_multiple;
108 	u32 frac;
109 	u64 multiplier;
110 
111 	pll_freq = pll->vco_current_rate;
112 
113 	divider = fref * 2;
114 
115 	multiplier = 1 << FRAC_BITS;
116 	dec_multiple = div_u64(pll_freq * multiplier, divider);
117 	dec = div_u64_rem(dec_multiple, multiplier, &frac);
118 
119 	if (pll_freq <= 1900000000UL)
120 		config->pll_prop_gain_rate = 8;
121 	else if (pll_freq <= 3000000000UL)
122 		config->pll_prop_gain_rate = 10;
123 	else
124 		config->pll_prop_gain_rate = 12;
125 	if (pll_freq < 1100000000UL)
126 		config->pll_clock_inverters = 8;
127 	else
128 		config->pll_clock_inverters = 0;
129 
130 	config->decimal_div_start = dec;
131 	config->frac_div_start = frac;
132 }
133 
134 #define SSC_CENTER		BIT(0)
135 #define SSC_EN			BIT(1)
136 
137 static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
138 {
139 	u32 ssc_per;
140 	u32 ssc_mod;
141 	u64 ssc_step_size;
142 	u64 frac;
143 
144 	if (!config->enable_ssc) {
145 		DBG("SSC not enabled\n");
146 		return;
147 	}
148 
149 	ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
150 	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
151 	ssc_per -= ssc_mod;
152 
153 	frac = config->frac_div_start;
154 	ssc_step_size = config->decimal_div_start;
155 	ssc_step_size *= (1 << FRAC_BITS);
156 	ssc_step_size += frac;
157 	ssc_step_size *= config->ssc_offset;
158 	ssc_step_size *= (config->ssc_adj_per + 1);
159 	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
160 	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
161 
162 	config->ssc_div_per = ssc_per;
163 	config->ssc_stepsize = ssc_step_size;
164 
165 	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
166 		 config->decimal_div_start, frac, FRAC_BITS);
167 	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
168 		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
169 }
170 
171 static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
172 {
173 	void __iomem *base = pll->phy->pll_base;
174 
175 	if (config->enable_ssc) {
176 		pr_debug("SSC is enabled\n");
177 
178 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
179 			  config->ssc_stepsize & 0xff);
180 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
181 			  config->ssc_stepsize >> 8);
182 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
183 			  config->ssc_div_per & 0xff);
184 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
185 			  config->ssc_div_per >> 8);
186 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
187 			  config->ssc_adj_per & 0xff);
188 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
189 			  config->ssc_adj_per >> 8);
190 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
191 			  SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
192 	}
193 }
194 
195 static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
196 {
197 	void __iomem *base = pll->phy->pll_base;
198 
199 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
200 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
201 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
202 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
203 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
204 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
205 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
206 		  0xba);
207 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
208 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
209 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
210 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
211 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
212 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
213 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
214 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
215 		  0x4c);
216 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
217 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
218 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
219 }
220 
221 static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
222 {
223 	void __iomem *base = pll->phy->pll_base;
224 
225 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
226 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
227 		  config->decimal_div_start);
228 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
229 		  config->frac_div_start & 0xff);
230 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
231 		  (config->frac_div_start & 0xff00) >> 8);
232 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
233 		  (config->frac_div_start & 0x30000) >> 16);
234 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
235 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
236 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
237 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
238 		  config->pll_clock_inverters);
239 }
240 
241 static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
242 				     unsigned long parent_rate)
243 {
244 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
245 	struct dsi_pll_config config;
246 
247 	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
248 	    parent_rate);
249 
250 	pll_10nm->vco_current_rate = rate;
251 
252 	dsi_pll_setup_config(&config);
253 
254 	dsi_pll_calc_dec_frac(pll_10nm, &config);
255 
256 	dsi_pll_calc_ssc(pll_10nm, &config);
257 
258 	dsi_pll_commit(pll_10nm, &config);
259 
260 	dsi_pll_config_hzindep_reg(pll_10nm);
261 
262 	dsi_pll_ssc_commit(pll_10nm, &config);
263 
264 	/* flush, ensure all register writes are done*/
265 	wmb();
266 
267 	return 0;
268 }
269 
270 static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
271 {
272 	struct device *dev = &pll->phy->pdev->dev;
273 	int rc;
274 	u32 status = 0;
275 	u32 const delay_us = 100;
276 	u32 const timeout_us = 5000;
277 
278 	rc = readl_poll_timeout_atomic(pll->phy->pll_base +
279 				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
280 				       status,
281 				       ((status & BIT(0)) > 0),
282 				       delay_us,
283 				       timeout_us);
284 	if (rc)
285 		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
286 			      pll->phy->id, status);
287 
288 	return rc;
289 }
290 
291 static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
292 {
293 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
294 
295 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
296 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
297 		  data & ~BIT(5));
298 	ndelay(250);
299 }
300 
301 static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
302 {
303 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
304 
305 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
306 		  data | BIT(5));
307 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
308 	ndelay(250);
309 }
310 
311 static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
312 {
313 	u32 data;
314 
315 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
316 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
317 		  data & ~BIT(5));
318 }
319 
320 static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
321 {
322 	u32 data;
323 
324 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
325 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
326 		  data | BIT(5));
327 }
328 
329 static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
330 {
331 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
332 	struct device *dev = &pll_10nm->phy->pdev->dev;
333 	int rc;
334 
335 	dsi_pll_enable_pll_bias(pll_10nm);
336 	if (pll_10nm->slave)
337 		dsi_pll_enable_pll_bias(pll_10nm->slave);
338 
339 	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
340 	if (rc) {
341 		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
342 		return rc;
343 	}
344 
345 	/* Start PLL */
346 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
347 		  0x01);
348 
349 	/*
350 	 * ensure all PLL configurations are written prior to checking
351 	 * for PLL lock.
352 	 */
353 	wmb();
354 
355 	/* Check for PLL lock */
356 	rc = dsi_pll_10nm_lock_status(pll_10nm);
357 	if (rc) {
358 		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
359 		goto error;
360 	}
361 
362 	pll_10nm->phy->pll_on = true;
363 
364 	dsi_pll_enable_global_clk(pll_10nm);
365 	if (pll_10nm->slave)
366 		dsi_pll_enable_global_clk(pll_10nm->slave);
367 
368 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
369 		  0x01);
370 	if (pll_10nm->slave)
371 		dsi_phy_write(pll_10nm->slave->phy->base +
372 			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
373 
374 error:
375 	return rc;
376 }
377 
378 static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
379 {
380 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
381 	dsi_pll_disable_pll_bias(pll);
382 }
383 
384 static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
385 {
386 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
387 
388 	/*
389 	 * To avoid any stray glitches while abruptly powering down the PLL
390 	 * make sure to gate the clock using the clock enable bit before
391 	 * powering down the PLL
392 	 */
393 	dsi_pll_disable_global_clk(pll_10nm);
394 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
395 	dsi_pll_disable_sub(pll_10nm);
396 	if (pll_10nm->slave) {
397 		dsi_pll_disable_global_clk(pll_10nm->slave);
398 		dsi_pll_disable_sub(pll_10nm->slave);
399 	}
400 	/* flush, ensure all register writes are done */
401 	wmb();
402 	pll_10nm->phy->pll_on = false;
403 }
404 
405 static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
406 						  unsigned long parent_rate)
407 {
408 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
409 	void __iomem *base = pll_10nm->phy->pll_base;
410 	u64 ref_clk = VCO_REF_CLK_RATE;
411 	u64 vco_rate = 0x0;
412 	u64 multiplier;
413 	u32 frac;
414 	u32 dec;
415 	u64 pll_freq, tmp64;
416 
417 	dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
418 	dec &= 0xff;
419 
420 	frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
421 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
422 		  0xff) << 8);
423 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
424 		  0x3) << 16);
425 
426 	/*
427 	 * TODO:
428 	 *	1. Assumes prescaler is disabled
429 	 */
430 	multiplier = 1 << FRAC_BITS;
431 	pll_freq = dec * (ref_clk * 2);
432 	tmp64 = (ref_clk * 2 * frac);
433 	pll_freq += div_u64(tmp64, multiplier);
434 
435 	vco_rate = pll_freq;
436 	pll_10nm->vco_current_rate = vco_rate;
437 
438 	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
439 	    pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
440 
441 	return (unsigned long)vco_rate;
442 }
443 
444 static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
445 		unsigned long rate, unsigned long *parent_rate)
446 {
447 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
448 
449 	if      (rate < pll_10nm->phy->cfg->min_pll_rate)
450 		return  pll_10nm->phy->cfg->min_pll_rate;
451 	else if (rate > pll_10nm->phy->cfg->max_pll_rate)
452 		return  pll_10nm->phy->cfg->max_pll_rate;
453 	else
454 		return rate;
455 }
456 
457 static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
458 	.round_rate = dsi_pll_10nm_clk_round_rate,
459 	.set_rate = dsi_pll_10nm_vco_set_rate,
460 	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
461 	.prepare = dsi_pll_10nm_vco_prepare,
462 	.unprepare = dsi_pll_10nm_vco_unprepare,
463 };
464 
465 /*
466  * PLL Callbacks
467  */
468 
469 static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
470 {
471 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
472 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
473 	void __iomem *phy_base = pll_10nm->phy->base;
474 	u32 cmn_clk_cfg0, cmn_clk_cfg1;
475 
476 	cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
477 				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
478 	cached->pll_out_div &= 0x3;
479 
480 	cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
481 	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
482 	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
483 
484 	cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
485 	cached->pll_mux = cmn_clk_cfg1 & 0x3;
486 
487 	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
488 	    pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
489 	    cached->pix_clk_div, cached->pll_mux);
490 }
491 
492 static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
493 {
494 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
495 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
496 	void __iomem *phy_base = pll_10nm->phy->base;
497 	u32 val;
498 	int ret;
499 
500 	val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
501 	val &= ~0x3;
502 	val |= cached->pll_out_div;
503 	dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
504 
505 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
506 		  cached->bit_clk_div | (cached->pix_clk_div << 4));
507 
508 	val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
509 	val &= ~0x3;
510 	val |= cached->pll_mux;
511 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
512 
513 	ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
514 			pll_10nm->vco_current_rate,
515 			VCO_REF_CLK_RATE);
516 	if (ret) {
517 		DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
518 			"restore vco rate failed. ret=%d\n", ret);
519 		return ret;
520 	}
521 
522 	DBG("DSI PLL%d", pll_10nm->phy->id);
523 
524 	return 0;
525 }
526 
527 static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
528 {
529 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
530 	void __iomem *base = phy->base;
531 	u32 data = 0x0;	/* internal PLL */
532 
533 	DBG("DSI PLL%d", pll_10nm->phy->id);
534 
535 	switch (phy->usecase) {
536 	case MSM_DSI_PHY_STANDALONE:
537 		break;
538 	case MSM_DSI_PHY_MASTER:
539 		pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
540 		break;
541 	case MSM_DSI_PHY_SLAVE:
542 		data = 0x1; /* external PLL */
543 		break;
544 	default:
545 		return -EINVAL;
546 	}
547 
548 	/* set PLL src */
549 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
550 
551 	return 0;
552 }
553 
554 /*
555  * The post dividers and mux clocks are created using the standard divider and
556  * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
557  * state to follow the master PLL's divider/mux state. Therefore, we don't
558  * require special clock ops that also configure the slave PLL registers
559  */
560 static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
561 {
562 	char clk_name[32], parent[32], vco_name[32];
563 	char parent2[32], parent3[32], parent4[32];
564 	struct clk_init_data vco_init = {
565 		.parent_names = (const char *[]){ "xo" },
566 		.num_parents = 1,
567 		.name = vco_name,
568 		.flags = CLK_IGNORE_UNUSED,
569 		.ops = &clk_ops_dsi_pll_10nm_vco,
570 	};
571 	struct device *dev = &pll_10nm->phy->pdev->dev;
572 	struct clk_hw *hw;
573 	int ret;
574 
575 	DBG("DSI%d", pll_10nm->phy->id);
576 
577 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
578 	pll_10nm->clk_hw.init = &vco_init;
579 
580 	ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
581 	if (ret)
582 		return ret;
583 
584 	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
585 	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
586 
587 	hw = devm_clk_hw_register_divider(dev, clk_name,
588 				     parent, CLK_SET_RATE_PARENT,
589 				     pll_10nm->phy->pll_base +
590 				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
591 				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
592 	if (IS_ERR(hw)) {
593 		ret = PTR_ERR(hw);
594 		goto fail;
595 	}
596 
597 	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
598 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
599 
600 	/* BIT CLK: DIV_CTRL_3_0 */
601 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
602 				     CLK_SET_RATE_PARENT,
603 				     pll_10nm->phy->base +
604 				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
605 				     0, 4, CLK_DIVIDER_ONE_BASED,
606 				     &pll_10nm->postdiv_lock);
607 	if (IS_ERR(hw)) {
608 		ret = PTR_ERR(hw);
609 		goto fail;
610 	}
611 
612 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
613 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
614 
615 	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
616 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
617 					  CLK_SET_RATE_PARENT, 1, 8);
618 	if (IS_ERR(hw)) {
619 		ret = PTR_ERR(hw);
620 		goto fail;
621 	}
622 
623 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
624 
625 	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
626 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
627 
628 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
629 					  0, 1, 2);
630 	if (IS_ERR(hw)) {
631 		ret = PTR_ERR(hw);
632 		goto fail;
633 	}
634 
635 	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
636 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
637 
638 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
639 					  0, 1, 4);
640 	if (IS_ERR(hw)) {
641 		ret = PTR_ERR(hw);
642 		goto fail;
643 	}
644 
645 	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
646 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
647 	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
648 	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
649 	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
650 
651 	hw = devm_clk_hw_register_mux(dev, clk_name,
652 				 ((const char *[]){
653 				 parent, parent2, parent3, parent4
654 				 }), 4, 0, pll_10nm->phy->base +
655 				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
656 				 0, 2, 0, NULL);
657 	if (IS_ERR(hw)) {
658 		ret = PTR_ERR(hw);
659 		goto fail;
660 	}
661 
662 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
663 	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
664 
665 	/* PIX CLK DIV : DIV_CTRL_7_4*/
666 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
667 				     0, pll_10nm->phy->base +
668 					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
669 				     4, 4, CLK_DIVIDER_ONE_BASED,
670 				     &pll_10nm->postdiv_lock);
671 	if (IS_ERR(hw)) {
672 		ret = PTR_ERR(hw);
673 		goto fail;
674 	}
675 
676 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
677 
678 	return 0;
679 
680 fail:
681 
682 	return ret;
683 }
684 
685 static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
686 {
687 	struct platform_device *pdev = phy->pdev;
688 	struct dsi_pll_10nm *pll_10nm;
689 	int ret;
690 
691 	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
692 	if (!pll_10nm)
693 		return -ENOMEM;
694 
695 	DBG("DSI PLL%d", phy->id);
696 
697 	pll_10nm_list[phy->id] = pll_10nm;
698 
699 	spin_lock_init(&pll_10nm->postdiv_lock);
700 
701 	pll_10nm->phy = phy;
702 
703 	ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
704 	if (ret) {
705 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
706 		return ret;
707 	}
708 
709 	phy->vco_hw = &pll_10nm->clk_hw;
710 
711 	/* TODO: Remove this when we have proper display handover support */
712 	msm_dsi_phy_pll_save_state(phy);
713 
714 	return 0;
715 }
716 
717 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
718 {
719 	void __iomem *base = phy->base;
720 	u32 data = 0;
721 
722 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
723 	mb(); /* make sure read happened */
724 
725 	return (data & BIT(0));
726 }
727 
728 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
729 {
730 	void __iomem *lane_base = phy->lane_base;
731 	int phy_lane_0 = 0;	/* TODO: Support all lane swap configs */
732 
733 	/*
734 	 * LPRX and CDRX need to enabled only for physical data lane
735 	 * corresponding to the logical data lane 0
736 	 */
737 	if (enable)
738 		dsi_phy_write(lane_base +
739 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
740 	else
741 		dsi_phy_write(lane_base +
742 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
743 }
744 
745 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
746 {
747 	int i;
748 	u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
749 	void __iomem *lane_base = phy->lane_base;
750 
751 	if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
752 		tx_dctrl[3] = 0x02;
753 
754 	/* Strength ctrl settings */
755 	for (i = 0; i < 5; i++) {
756 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
757 			      0x55);
758 		/*
759 		 * Disable LPRX and CDRX for all lanes. And later on, it will
760 		 * be only enabled for the physical data lane corresponding
761 		 * to the logical data lane 0
762 		 */
763 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
764 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
765 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
766 			      0x88);
767 	}
768 
769 	dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
770 
771 	/* other settings */
772 	for (i = 0; i < 5; i++) {
773 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
774 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
775 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
776 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
777 			      i == 4 ? 0x80 : 0x0);
778 		dsi_phy_write(lane_base +
779 			      REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
780 		dsi_phy_write(lane_base +
781 			      REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
782 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
783 			      tx_dctrl[i]);
784 	}
785 
786 	if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
787 		/* Toggle BIT 0 to release freeze I/0 */
788 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
789 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
790 	}
791 }
792 
793 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
794 			       struct msm_dsi_phy_clk_request *clk_req)
795 {
796 	int ret;
797 	u32 status;
798 	u32 const delay_us = 5;
799 	u32 const timeout_us = 1000;
800 	struct msm_dsi_dphy_timing *timing = &phy->timing;
801 	void __iomem *base = phy->base;
802 	u32 data;
803 
804 	DBG("");
805 
806 	if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
807 		DRM_DEV_ERROR(&phy->pdev->dev,
808 			"%s: D-PHY timing calculation failed\n", __func__);
809 		return -EINVAL;
810 	}
811 
812 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
813 		pr_warn("PLL turned on before configuring PHY\n");
814 
815 	/* wait for REFGEN READY */
816 	ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
817 					status, (status & BIT(0)),
818 					delay_us, timeout_us);
819 	if (ret) {
820 		pr_err("Ref gen not ready. Aborting\n");
821 		return -EINVAL;
822 	}
823 
824 	/* de-assert digital and pll power down */
825 	data = BIT(6) | BIT(5);
826 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
827 
828 	/* Assert PLL core reset */
829 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
830 
831 	/* turn off resync FIFO */
832 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
833 
834 	/* Select MS1 byte-clk */
835 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
836 
837 	/* Enable LDO */
838 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
839 
840 	/* Configure PHY lane swap (TODO: we need to calculate this) */
841 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
842 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
843 
844 	/* DSI PHY timings */
845 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
846 		      timing->hs_halfbyte_en);
847 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
848 		      timing->clk_zero);
849 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
850 		      timing->clk_prepare);
851 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
852 		      timing->clk_trail);
853 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
854 		      timing->hs_exit);
855 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
856 		      timing->hs_zero);
857 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
858 		      timing->hs_prepare);
859 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
860 		      timing->hs_trail);
861 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
862 		      timing->hs_rqst);
863 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
864 		      timing->ta_go | (timing->ta_sure << 3));
865 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
866 		      timing->ta_get);
867 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
868 		      0x00);
869 
870 	/* Remove power down from all blocks */
871 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
872 
873 	/* power up lanes */
874 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
875 
876 	/* TODO: only power up lanes that are used */
877 	data |= 0x1F;
878 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
879 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
880 
881 	/* Select full-rate mode */
882 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
883 
884 	ret = dsi_10nm_set_usecase(phy);
885 	if (ret) {
886 		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
887 			__func__, ret);
888 		return ret;
889 	}
890 
891 	/* DSI lane settings */
892 	dsi_phy_hw_v3_0_lane_settings(phy);
893 
894 	DBG("DSI%d PHY enabled", phy->id);
895 
896 	return 0;
897 }
898 
899 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
900 {
901 	void __iomem *base = phy->base;
902 	u32 data;
903 
904 	DBG("");
905 
906 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
907 		pr_warn("Turning OFF PHY while PLL is on\n");
908 
909 	dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
910 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
911 
912 	/* disable all lanes */
913 	data &= ~0x1F;
914 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
915 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
916 
917 	/* Turn off all PHY blocks */
918 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
919 	/* make sure phy is turned off */
920 	wmb();
921 
922 	DBG("DSI%d PHY disabled", phy->id);
923 }
924 
925 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
926 	.has_phy_lane = true,
927 	.reg_cfg = {
928 		.num = 1,
929 		.regs = {
930 			{"vdds", 36000, 32},
931 		},
932 	},
933 	.ops = {
934 		.enable = dsi_10nm_phy_enable,
935 		.disable = dsi_10nm_phy_disable,
936 		.pll_init = dsi_pll_10nm_init,
937 		.save_pll_state = dsi_10nm_pll_save_state,
938 		.restore_pll_state = dsi_10nm_pll_restore_state,
939 	},
940 	.min_pll_rate = 1000000000UL,
941 	.max_pll_rate = 3500000000UL,
942 	.io_start = { 0xae94400, 0xae96400 },
943 	.num_dsi_phy = 2,
944 };
945 
946 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
947 	.has_phy_lane = true,
948 	.reg_cfg = {
949 		.num = 1,
950 		.regs = {
951 			{"vdds", 36000, 32},
952 		},
953 	},
954 	.ops = {
955 		.enable = dsi_10nm_phy_enable,
956 		.disable = dsi_10nm_phy_disable,
957 		.pll_init = dsi_pll_10nm_init,
958 		.save_pll_state = dsi_10nm_pll_save_state,
959 		.restore_pll_state = dsi_10nm_pll_restore_state,
960 	},
961 	.min_pll_rate = 1000000000UL,
962 	.max_pll_rate = 3500000000UL,
963 	.io_start = { 0xc994400, 0xc996400 },
964 	.num_dsi_phy = 2,
965 	.quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
966 };
967