1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/iopoll.h>
9 
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 
13 /*
14  * DSI PLL 10nm - clock diagram (eg: DSI0):
15  *
16  *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
17  *                              |                |
18  *                              |                |
19  *                 +---------+  |  +----------+  |  +----+
20  *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
21  *                 +---------+  |  +----------+  |  +----+
22  *                              |                |
23  *                              |                |         dsi0_pll_by_2_bit_clk
24  *                              |                |          |
25  *                              |                |  +----+  |  |\  dsi0_pclk_mux
26  *                              |                |--| /2 |--o--| \   |
27  *                              |                |  +----+     |  \  |  +---------+
28  *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
29  *                              |------------------------------|  /     +---------+
30  *                              |          +-----+             | /
31  *                              -----------| /4? |--o----------|/
32  *                                         +-----+  |           |
33  *                                                  |           |dsiclk_sel
34  *                                                  |
35  *                                                  dsi0_pll_post_out_div_clk
36  */
37 
38 #define VCO_REF_CLK_RATE		19200000
39 #define FRAC_BITS 18
40 
41 /* v3.0.0 10nm implementation that requires the old timings settings */
42 #define DSI_PHY_10NM_QUIRK_OLD_TIMINGS	BIT(0)
43 
44 struct dsi_pll_config {
45 	bool enable_ssc;
46 	bool ssc_center;
47 	u32 ssc_freq;
48 	u32 ssc_offset;
49 	u32 ssc_adj_per;
50 
51 	/* out */
52 	u32 pll_prop_gain_rate;
53 	u32 decimal_div_start;
54 	u32 frac_div_start;
55 	u32 pll_clock_inverters;
56 	u32 ssc_stepsize;
57 	u32 ssc_div_per;
58 };
59 
60 struct pll_10nm_cached_state {
61 	unsigned long vco_rate;
62 	u8 bit_clk_div;
63 	u8 pix_clk_div;
64 	u8 pll_out_div;
65 	u8 pll_mux;
66 };
67 
68 struct dsi_pll_10nm {
69 	struct clk_hw clk_hw;
70 
71 	struct msm_dsi_phy *phy;
72 
73 	u64 vco_current_rate;
74 
75 	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
76 	spinlock_t postdiv_lock;
77 
78 	struct pll_10nm_cached_state cached_state;
79 
80 	struct dsi_pll_10nm *slave;
81 };
82 
83 #define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, clk_hw)
84 
85 /*
86  * Global list of private DSI PLL struct pointers. We need this for Dual DSI
87  * mode, where the master PLL's clk_ops needs access the slave's private data
88  */
89 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
90 
91 static void dsi_pll_setup_config(struct dsi_pll_config *config)
92 {
93 	config->ssc_freq = 31500;
94 	config->ssc_offset = 5000;
95 	config->ssc_adj_per = 2;
96 
97 	config->enable_ssc = false;
98 	config->ssc_center = false;
99 }
100 
101 static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
102 {
103 	u64 fref = VCO_REF_CLK_RATE;
104 	u64 pll_freq;
105 	u64 divider;
106 	u64 dec, dec_multiple;
107 	u32 frac;
108 	u64 multiplier;
109 
110 	pll_freq = pll->vco_current_rate;
111 
112 	divider = fref * 2;
113 
114 	multiplier = 1 << FRAC_BITS;
115 	dec_multiple = div_u64(pll_freq * multiplier, divider);
116 	dec = div_u64_rem(dec_multiple, multiplier, &frac);
117 
118 	if (pll_freq <= 1900000000UL)
119 		config->pll_prop_gain_rate = 8;
120 	else if (pll_freq <= 3000000000UL)
121 		config->pll_prop_gain_rate = 10;
122 	else
123 		config->pll_prop_gain_rate = 12;
124 	if (pll_freq < 1100000000UL)
125 		config->pll_clock_inverters = 8;
126 	else
127 		config->pll_clock_inverters = 0;
128 
129 	config->decimal_div_start = dec;
130 	config->frac_div_start = frac;
131 }
132 
133 #define SSC_CENTER		BIT(0)
134 #define SSC_EN			BIT(1)
135 
136 static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
137 {
138 	u32 ssc_per;
139 	u32 ssc_mod;
140 	u64 ssc_step_size;
141 	u64 frac;
142 
143 	if (!config->enable_ssc) {
144 		DBG("SSC not enabled\n");
145 		return;
146 	}
147 
148 	ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
149 	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
150 	ssc_per -= ssc_mod;
151 
152 	frac = config->frac_div_start;
153 	ssc_step_size = config->decimal_div_start;
154 	ssc_step_size *= (1 << FRAC_BITS);
155 	ssc_step_size += frac;
156 	ssc_step_size *= config->ssc_offset;
157 	ssc_step_size *= (config->ssc_adj_per + 1);
158 	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
159 	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
160 
161 	config->ssc_div_per = ssc_per;
162 	config->ssc_stepsize = ssc_step_size;
163 
164 	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
165 		 config->decimal_div_start, frac, FRAC_BITS);
166 	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
167 		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
168 }
169 
170 static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
171 {
172 	void __iomem *base = pll->phy->pll_base;
173 
174 	if (config->enable_ssc) {
175 		pr_debug("SSC is enabled\n");
176 
177 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
178 			  config->ssc_stepsize & 0xff);
179 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
180 			  config->ssc_stepsize >> 8);
181 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
182 			  config->ssc_div_per & 0xff);
183 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
184 			  config->ssc_div_per >> 8);
185 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
186 			  config->ssc_adj_per & 0xff);
187 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
188 			  config->ssc_adj_per >> 8);
189 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
190 			  SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
191 	}
192 }
193 
194 static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
195 {
196 	void __iomem *base = pll->phy->pll_base;
197 
198 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
199 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
200 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
201 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
202 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
203 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
204 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
205 		  0xba);
206 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
207 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
208 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
209 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
210 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
211 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
212 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
213 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
214 		  0x4c);
215 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
216 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
217 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
218 }
219 
220 static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
221 {
222 	void __iomem *base = pll->phy->pll_base;
223 
224 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
225 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
226 		  config->decimal_div_start);
227 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
228 		  config->frac_div_start & 0xff);
229 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
230 		  (config->frac_div_start & 0xff00) >> 8);
231 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
232 		  (config->frac_div_start & 0x30000) >> 16);
233 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
234 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
235 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
236 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
237 		  config->pll_clock_inverters);
238 }
239 
240 static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
241 				     unsigned long parent_rate)
242 {
243 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
244 	struct dsi_pll_config config;
245 
246 	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
247 	    parent_rate);
248 
249 	pll_10nm->vco_current_rate = rate;
250 
251 	dsi_pll_setup_config(&config);
252 
253 	dsi_pll_calc_dec_frac(pll_10nm, &config);
254 
255 	dsi_pll_calc_ssc(pll_10nm, &config);
256 
257 	dsi_pll_commit(pll_10nm, &config);
258 
259 	dsi_pll_config_hzindep_reg(pll_10nm);
260 
261 	dsi_pll_ssc_commit(pll_10nm, &config);
262 
263 	/* flush, ensure all register writes are done*/
264 	wmb();
265 
266 	return 0;
267 }
268 
269 static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
270 {
271 	struct device *dev = &pll->phy->pdev->dev;
272 	int rc;
273 	u32 status = 0;
274 	u32 const delay_us = 100;
275 	u32 const timeout_us = 5000;
276 
277 	rc = readl_poll_timeout_atomic(pll->phy->pll_base +
278 				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
279 				       status,
280 				       ((status & BIT(0)) > 0),
281 				       delay_us,
282 				       timeout_us);
283 	if (rc)
284 		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
285 			      pll->phy->id, status);
286 
287 	return rc;
288 }
289 
290 static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
291 {
292 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
293 
294 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
295 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
296 		  data & ~BIT(5));
297 	ndelay(250);
298 }
299 
300 static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
301 {
302 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
303 
304 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
305 		  data | BIT(5));
306 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
307 	ndelay(250);
308 }
309 
310 static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
311 {
312 	u32 data;
313 
314 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
315 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
316 		  data & ~BIT(5));
317 }
318 
319 static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
320 {
321 	u32 data;
322 
323 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
324 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
325 		  data | BIT(5));
326 }
327 
328 static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
329 {
330 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
331 	struct device *dev = &pll_10nm->phy->pdev->dev;
332 	int rc;
333 
334 	dsi_pll_enable_pll_bias(pll_10nm);
335 	if (pll_10nm->slave)
336 		dsi_pll_enable_pll_bias(pll_10nm->slave);
337 
338 	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
339 	if (rc) {
340 		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
341 		return rc;
342 	}
343 
344 	/* Start PLL */
345 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
346 		  0x01);
347 
348 	/*
349 	 * ensure all PLL configurations are written prior to checking
350 	 * for PLL lock.
351 	 */
352 	wmb();
353 
354 	/* Check for PLL lock */
355 	rc = dsi_pll_10nm_lock_status(pll_10nm);
356 	if (rc) {
357 		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
358 		goto error;
359 	}
360 
361 	pll_10nm->phy->pll_on = true;
362 
363 	dsi_pll_enable_global_clk(pll_10nm);
364 	if (pll_10nm->slave)
365 		dsi_pll_enable_global_clk(pll_10nm->slave);
366 
367 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
368 		  0x01);
369 	if (pll_10nm->slave)
370 		dsi_phy_write(pll_10nm->slave->phy->base +
371 			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
372 
373 error:
374 	return rc;
375 }
376 
377 static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
378 {
379 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
380 	dsi_pll_disable_pll_bias(pll);
381 }
382 
383 static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
384 {
385 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
386 
387 	/*
388 	 * To avoid any stray glitches while abruptly powering down the PLL
389 	 * make sure to gate the clock using the clock enable bit before
390 	 * powering down the PLL
391 	 */
392 	dsi_pll_disable_global_clk(pll_10nm);
393 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
394 	dsi_pll_disable_sub(pll_10nm);
395 	if (pll_10nm->slave) {
396 		dsi_pll_disable_global_clk(pll_10nm->slave);
397 		dsi_pll_disable_sub(pll_10nm->slave);
398 	}
399 	/* flush, ensure all register writes are done */
400 	wmb();
401 	pll_10nm->phy->pll_on = false;
402 }
403 
404 static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
405 						  unsigned long parent_rate)
406 {
407 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
408 	void __iomem *base = pll_10nm->phy->pll_base;
409 	u64 ref_clk = VCO_REF_CLK_RATE;
410 	u64 vco_rate = 0x0;
411 	u64 multiplier;
412 	u32 frac;
413 	u32 dec;
414 	u64 pll_freq, tmp64;
415 
416 	dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
417 	dec &= 0xff;
418 
419 	frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
420 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
421 		  0xff) << 8);
422 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
423 		  0x3) << 16);
424 
425 	/*
426 	 * TODO:
427 	 *	1. Assumes prescaler is disabled
428 	 */
429 	multiplier = 1 << FRAC_BITS;
430 	pll_freq = dec * (ref_clk * 2);
431 	tmp64 = (ref_clk * 2 * frac);
432 	pll_freq += div_u64(tmp64, multiplier);
433 
434 	vco_rate = pll_freq;
435 
436 	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
437 	    pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
438 
439 	return (unsigned long)vco_rate;
440 }
441 
442 static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
443 		unsigned long rate, unsigned long *parent_rate)
444 {
445 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
446 
447 	if      (rate < pll_10nm->phy->cfg->min_pll_rate)
448 		return  pll_10nm->phy->cfg->min_pll_rate;
449 	else if (rate > pll_10nm->phy->cfg->max_pll_rate)
450 		return  pll_10nm->phy->cfg->max_pll_rate;
451 	else
452 		return rate;
453 }
454 
455 static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
456 	.round_rate = dsi_pll_10nm_clk_round_rate,
457 	.set_rate = dsi_pll_10nm_vco_set_rate,
458 	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
459 	.prepare = dsi_pll_10nm_vco_prepare,
460 	.unprepare = dsi_pll_10nm_vco_unprepare,
461 };
462 
463 /*
464  * PLL Callbacks
465  */
466 
467 static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
468 {
469 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
470 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
471 	void __iomem *phy_base = pll_10nm->phy->base;
472 	u32 cmn_clk_cfg0, cmn_clk_cfg1;
473 
474 	cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
475 				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
476 	cached->pll_out_div &= 0x3;
477 
478 	cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
479 	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
480 	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
481 
482 	cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
483 	cached->pll_mux = cmn_clk_cfg1 & 0x3;
484 
485 	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
486 	    pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
487 	    cached->pix_clk_div, cached->pll_mux);
488 }
489 
490 static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
491 {
492 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
493 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
494 	void __iomem *phy_base = pll_10nm->phy->base;
495 	u32 val;
496 	int ret;
497 
498 	val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
499 	val &= ~0x3;
500 	val |= cached->pll_out_div;
501 	dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
502 
503 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
504 		  cached->bit_clk_div | (cached->pix_clk_div << 4));
505 
506 	val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
507 	val &= ~0x3;
508 	val |= cached->pll_mux;
509 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
510 
511 	ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
512 			pll_10nm->vco_current_rate,
513 			VCO_REF_CLK_RATE);
514 	if (ret) {
515 		DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
516 			"restore vco rate failed. ret=%d\n", ret);
517 		return ret;
518 	}
519 
520 	DBG("DSI PLL%d", pll_10nm->phy->id);
521 
522 	return 0;
523 }
524 
525 static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
526 {
527 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
528 	void __iomem *base = phy->base;
529 	u32 data = 0x0;	/* internal PLL */
530 
531 	DBG("DSI PLL%d", pll_10nm->phy->id);
532 
533 	switch (phy->usecase) {
534 	case MSM_DSI_PHY_STANDALONE:
535 		break;
536 	case MSM_DSI_PHY_MASTER:
537 		pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
538 		break;
539 	case MSM_DSI_PHY_SLAVE:
540 		data = 0x1; /* external PLL */
541 		break;
542 	default:
543 		return -EINVAL;
544 	}
545 
546 	/* set PLL src */
547 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
548 
549 	return 0;
550 }
551 
552 /*
553  * The post dividers and mux clocks are created using the standard divider and
554  * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
555  * state to follow the master PLL's divider/mux state. Therefore, we don't
556  * require special clock ops that also configure the slave PLL registers
557  */
558 static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
559 {
560 	char clk_name[32], parent[32], vco_name[32];
561 	char parent2[32], parent3[32], parent4[32];
562 	struct clk_init_data vco_init = {
563 		.parent_names = (const char *[]){ "xo" },
564 		.num_parents = 1,
565 		.name = vco_name,
566 		.flags = CLK_IGNORE_UNUSED,
567 		.ops = &clk_ops_dsi_pll_10nm_vco,
568 	};
569 	struct device *dev = &pll_10nm->phy->pdev->dev;
570 	struct clk_hw *hw;
571 	int ret;
572 
573 	DBG("DSI%d", pll_10nm->phy->id);
574 
575 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
576 	pll_10nm->clk_hw.init = &vco_init;
577 
578 	ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
579 	if (ret)
580 		return ret;
581 
582 	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
583 	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
584 
585 	hw = devm_clk_hw_register_divider(dev, clk_name,
586 				     parent, CLK_SET_RATE_PARENT,
587 				     pll_10nm->phy->pll_base +
588 				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
589 				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
590 	if (IS_ERR(hw)) {
591 		ret = PTR_ERR(hw);
592 		goto fail;
593 	}
594 
595 	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
596 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
597 
598 	/* BIT CLK: DIV_CTRL_3_0 */
599 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
600 				     CLK_SET_RATE_PARENT,
601 				     pll_10nm->phy->base +
602 				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
603 				     0, 4, CLK_DIVIDER_ONE_BASED,
604 				     &pll_10nm->postdiv_lock);
605 	if (IS_ERR(hw)) {
606 		ret = PTR_ERR(hw);
607 		goto fail;
608 	}
609 
610 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
611 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
612 
613 	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
614 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
615 					  CLK_SET_RATE_PARENT, 1, 8);
616 	if (IS_ERR(hw)) {
617 		ret = PTR_ERR(hw);
618 		goto fail;
619 	}
620 
621 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
622 
623 	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
624 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
625 
626 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
627 					  0, 1, 2);
628 	if (IS_ERR(hw)) {
629 		ret = PTR_ERR(hw);
630 		goto fail;
631 	}
632 
633 	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
634 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
635 
636 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
637 					  0, 1, 4);
638 	if (IS_ERR(hw)) {
639 		ret = PTR_ERR(hw);
640 		goto fail;
641 	}
642 
643 	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
644 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
645 	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
646 	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
647 	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
648 
649 	hw = devm_clk_hw_register_mux(dev, clk_name,
650 				 ((const char *[]){
651 				 parent, parent2, parent3, parent4
652 				 }), 4, 0, pll_10nm->phy->base +
653 				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
654 				 0, 2, 0, NULL);
655 	if (IS_ERR(hw)) {
656 		ret = PTR_ERR(hw);
657 		goto fail;
658 	}
659 
660 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
661 	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
662 
663 	/* PIX CLK DIV : DIV_CTRL_7_4*/
664 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
665 				     0, pll_10nm->phy->base +
666 					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
667 				     4, 4, CLK_DIVIDER_ONE_BASED,
668 				     &pll_10nm->postdiv_lock);
669 	if (IS_ERR(hw)) {
670 		ret = PTR_ERR(hw);
671 		goto fail;
672 	}
673 
674 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
675 
676 	return 0;
677 
678 fail:
679 
680 	return ret;
681 }
682 
683 static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
684 {
685 	struct platform_device *pdev = phy->pdev;
686 	struct dsi_pll_10nm *pll_10nm;
687 	int ret;
688 
689 	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
690 	if (!pll_10nm)
691 		return -ENOMEM;
692 
693 	DBG("DSI PLL%d", phy->id);
694 
695 	pll_10nm_list[phy->id] = pll_10nm;
696 
697 	spin_lock_init(&pll_10nm->postdiv_lock);
698 
699 	pll_10nm->phy = phy;
700 
701 	ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
702 	if (ret) {
703 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
704 		return ret;
705 	}
706 
707 	phy->vco_hw = &pll_10nm->clk_hw;
708 
709 	/* TODO: Remove this when we have proper display handover support */
710 	msm_dsi_phy_pll_save_state(phy);
711 
712 	return 0;
713 }
714 
715 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
716 {
717 	void __iomem *base = phy->base;
718 	u32 data = 0;
719 
720 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
721 	mb(); /* make sure read happened */
722 
723 	return (data & BIT(0));
724 }
725 
726 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
727 {
728 	void __iomem *lane_base = phy->lane_base;
729 	int phy_lane_0 = 0;	/* TODO: Support all lane swap configs */
730 
731 	/*
732 	 * LPRX and CDRX need to enabled only for physical data lane
733 	 * corresponding to the logical data lane 0
734 	 */
735 	if (enable)
736 		dsi_phy_write(lane_base +
737 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
738 	else
739 		dsi_phy_write(lane_base +
740 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
741 }
742 
743 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
744 {
745 	int i;
746 	u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
747 	void __iomem *lane_base = phy->lane_base;
748 
749 	if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
750 		tx_dctrl[3] = 0x02;
751 
752 	/* Strength ctrl settings */
753 	for (i = 0; i < 5; i++) {
754 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
755 			      0x55);
756 		/*
757 		 * Disable LPRX and CDRX for all lanes. And later on, it will
758 		 * be only enabled for the physical data lane corresponding
759 		 * to the logical data lane 0
760 		 */
761 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
762 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
763 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
764 			      0x88);
765 	}
766 
767 	dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
768 
769 	/* other settings */
770 	for (i = 0; i < 5; i++) {
771 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
772 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
773 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
774 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
775 			      i == 4 ? 0x80 : 0x0);
776 		dsi_phy_write(lane_base +
777 			      REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
778 		dsi_phy_write(lane_base +
779 			      REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
780 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
781 			      tx_dctrl[i]);
782 	}
783 
784 	if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
785 		/* Toggle BIT 0 to release freeze I/0 */
786 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
787 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
788 	}
789 }
790 
791 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
792 			       struct msm_dsi_phy_clk_request *clk_req)
793 {
794 	int ret;
795 	u32 status;
796 	u32 const delay_us = 5;
797 	u32 const timeout_us = 1000;
798 	struct msm_dsi_dphy_timing *timing = &phy->timing;
799 	void __iomem *base = phy->base;
800 	u32 data;
801 
802 	DBG("");
803 
804 	if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
805 		DRM_DEV_ERROR(&phy->pdev->dev,
806 			"%s: D-PHY timing calculation failed\n", __func__);
807 		return -EINVAL;
808 	}
809 
810 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
811 		pr_warn("PLL turned on before configuring PHY\n");
812 
813 	/* wait for REFGEN READY */
814 	ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
815 					status, (status & BIT(0)),
816 					delay_us, timeout_us);
817 	if (ret) {
818 		pr_err("Ref gen not ready. Aborting\n");
819 		return -EINVAL;
820 	}
821 
822 	/* de-assert digital and pll power down */
823 	data = BIT(6) | BIT(5);
824 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
825 
826 	/* Assert PLL core reset */
827 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
828 
829 	/* turn off resync FIFO */
830 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
831 
832 	/* Select MS1 byte-clk */
833 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
834 
835 	/* Enable LDO */
836 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
837 
838 	/* Configure PHY lane swap (TODO: we need to calculate this) */
839 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
840 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
841 
842 	/* DSI PHY timings */
843 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
844 		      timing->hs_halfbyte_en);
845 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
846 		      timing->clk_zero);
847 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
848 		      timing->clk_prepare);
849 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
850 		      timing->clk_trail);
851 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
852 		      timing->hs_exit);
853 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
854 		      timing->hs_zero);
855 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
856 		      timing->hs_prepare);
857 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
858 		      timing->hs_trail);
859 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
860 		      timing->hs_rqst);
861 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
862 		      timing->ta_go | (timing->ta_sure << 3));
863 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
864 		      timing->ta_get);
865 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
866 		      0x00);
867 
868 	/* Remove power down from all blocks */
869 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
870 
871 	/* power up lanes */
872 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
873 
874 	/* TODO: only power up lanes that are used */
875 	data |= 0x1F;
876 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
877 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
878 
879 	/* Select full-rate mode */
880 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
881 
882 	ret = dsi_10nm_set_usecase(phy);
883 	if (ret) {
884 		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
885 			__func__, ret);
886 		return ret;
887 	}
888 
889 	/* DSI lane settings */
890 	dsi_phy_hw_v3_0_lane_settings(phy);
891 
892 	DBG("DSI%d PHY enabled", phy->id);
893 
894 	return 0;
895 }
896 
897 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
898 {
899 	void __iomem *base = phy->base;
900 	u32 data;
901 
902 	DBG("");
903 
904 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
905 		pr_warn("Turning OFF PHY while PLL is on\n");
906 
907 	dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
908 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
909 
910 	/* disable all lanes */
911 	data &= ~0x1F;
912 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
913 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
914 
915 	/* Turn off all PHY blocks */
916 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
917 	/* make sure phy is turned off */
918 	wmb();
919 
920 	DBG("DSI%d PHY disabled", phy->id);
921 }
922 
923 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
924 	.has_phy_lane = true,
925 	.reg_cfg = {
926 		.num = 1,
927 		.regs = {
928 			{"vdds", 36000, 32},
929 		},
930 	},
931 	.ops = {
932 		.enable = dsi_10nm_phy_enable,
933 		.disable = dsi_10nm_phy_disable,
934 		.pll_init = dsi_pll_10nm_init,
935 		.save_pll_state = dsi_10nm_pll_save_state,
936 		.restore_pll_state = dsi_10nm_pll_restore_state,
937 	},
938 	.min_pll_rate = 1000000000UL,
939 	.max_pll_rate = 3500000000UL,
940 	.io_start = { 0xae94400, 0xae96400 },
941 	.num_dsi_phy = 2,
942 };
943 
944 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
945 	.has_phy_lane = true,
946 	.reg_cfg = {
947 		.num = 1,
948 		.regs = {
949 			{"vdds", 36000, 32},
950 		},
951 	},
952 	.ops = {
953 		.enable = dsi_10nm_phy_enable,
954 		.disable = dsi_10nm_phy_disable,
955 		.pll_init = dsi_pll_10nm_init,
956 		.save_pll_state = dsi_10nm_pll_save_state,
957 		.restore_pll_state = dsi_10nm_pll_restore_state,
958 	},
959 	.min_pll_rate = 1000000000UL,
960 	.max_pll_rate = 3500000000UL,
961 	.io_start = { 0xc994400, 0xc996400 },
962 	.num_dsi_phy = 2,
963 	.quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
964 };
965