1 /*
2  * SPDX-License-Identifier: GPL-2.0
3  * Copyright (c) 2018, The Linux Foundation
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/iopoll.h>
9 
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 
13 /*
14  * DSI PLL 10nm - clock diagram (eg: DSI0):
15  *
16  *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
17  *                              |                |
18  *                              |                |
19  *                 +---------+  |  +----------+  |  +----+
20  *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
21  *                 +---------+  |  +----------+  |  +----+
22  *                              |                |
23  *                              |                |         dsi0_pll_by_2_bit_clk
24  *                              |                |          |
25  *                              |                |  +----+  |  |\  dsi0_pclk_mux
26  *                              |                |--| /2 |--o--| \   |
27  *                              |                |  +----+     |  \  |  +---------+
28  *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
29  *                              |------------------------------|  /     +---------+
30  *                              |          +-----+             | /
31  *                              -----------| /4? |--o----------|/
32  *                                         +-----+  |           |
33  *                                                  |           |dsiclk_sel
34  *                                                  |
35  *                                                  dsi0_pll_post_out_div_clk
36  */
37 
38 #define VCO_REF_CLK_RATE		19200000
39 #define FRAC_BITS 18
40 
41 /* v3.0.0 10nm implementation that requires the old timings settings */
42 #define DSI_PHY_10NM_QUIRK_OLD_TIMINGS	BIT(0)
43 
44 struct dsi_pll_config {
45 	bool enable_ssc;
46 	bool ssc_center;
47 	u32 ssc_freq;
48 	u32 ssc_offset;
49 	u32 ssc_adj_per;
50 
51 	/* out */
52 	u32 pll_prop_gain_rate;
53 	u32 decimal_div_start;
54 	u32 frac_div_start;
55 	u32 pll_clock_inverters;
56 	u32 ssc_stepsize;
57 	u32 ssc_div_per;
58 };
59 
60 struct pll_10nm_cached_state {
61 	unsigned long vco_rate;
62 	u8 bit_clk_div;
63 	u8 pix_clk_div;
64 	u8 pll_out_div;
65 	u8 pll_mux;
66 };
67 
68 struct dsi_pll_10nm {
69 	struct clk_hw clk_hw;
70 
71 	struct msm_dsi_phy *phy;
72 
73 	u64 vco_current_rate;
74 
75 	/* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
76 	spinlock_t postdiv_lock;
77 
78 	struct pll_10nm_cached_state cached_state;
79 
80 	struct dsi_pll_10nm *slave;
81 };
82 
83 #define to_pll_10nm(x)	container_of(x, struct dsi_pll_10nm, clk_hw)
84 
85 /*
86  * Global list of private DSI PLL struct pointers. We need this for Dual DSI
87  * mode, where the master PLL's clk_ops needs access the slave's private data
88  */
89 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
90 
91 static void dsi_pll_setup_config(struct dsi_pll_config *config)
92 {
93 	config->ssc_freq = 31500;
94 	config->ssc_offset = 5000;
95 	config->ssc_adj_per = 2;
96 
97 	config->enable_ssc = false;
98 	config->ssc_center = false;
99 }
100 
101 static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
102 {
103 	u64 fref = VCO_REF_CLK_RATE;
104 	u64 pll_freq;
105 	u64 divider;
106 	u64 dec, dec_multiple;
107 	u32 frac;
108 	u64 multiplier;
109 
110 	pll_freq = pll->vco_current_rate;
111 
112 	divider = fref * 2;
113 
114 	multiplier = 1 << FRAC_BITS;
115 	dec_multiple = div_u64(pll_freq * multiplier, divider);
116 	dec = div_u64_rem(dec_multiple, multiplier, &frac);
117 
118 	if (pll_freq <= 1900000000UL)
119 		config->pll_prop_gain_rate = 8;
120 	else if (pll_freq <= 3000000000UL)
121 		config->pll_prop_gain_rate = 10;
122 	else
123 		config->pll_prop_gain_rate = 12;
124 	if (pll_freq < 1100000000UL)
125 		config->pll_clock_inverters = 8;
126 	else
127 		config->pll_clock_inverters = 0;
128 
129 	config->decimal_div_start = dec;
130 	config->frac_div_start = frac;
131 }
132 
133 #define SSC_CENTER		BIT(0)
134 #define SSC_EN			BIT(1)
135 
136 static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
137 {
138 	u32 ssc_per;
139 	u32 ssc_mod;
140 	u64 ssc_step_size;
141 	u64 frac;
142 
143 	if (!config->enable_ssc) {
144 		DBG("SSC not enabled\n");
145 		return;
146 	}
147 
148 	ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
149 	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
150 	ssc_per -= ssc_mod;
151 
152 	frac = config->frac_div_start;
153 	ssc_step_size = config->decimal_div_start;
154 	ssc_step_size *= (1 << FRAC_BITS);
155 	ssc_step_size += frac;
156 	ssc_step_size *= config->ssc_offset;
157 	ssc_step_size *= (config->ssc_adj_per + 1);
158 	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
159 	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
160 
161 	config->ssc_div_per = ssc_per;
162 	config->ssc_stepsize = ssc_step_size;
163 
164 	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
165 		 config->decimal_div_start, frac, FRAC_BITS);
166 	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
167 		 ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
168 }
169 
170 static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
171 {
172 	void __iomem *base = pll->phy->pll_base;
173 
174 	if (config->enable_ssc) {
175 		pr_debug("SSC is enabled\n");
176 
177 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
178 			  config->ssc_stepsize & 0xff);
179 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
180 			  config->ssc_stepsize >> 8);
181 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
182 			  config->ssc_div_per & 0xff);
183 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
184 			  config->ssc_div_per >> 8);
185 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
186 			  config->ssc_adj_per & 0xff);
187 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
188 			  config->ssc_adj_per >> 8);
189 		dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
190 			  SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
191 	}
192 }
193 
194 static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
195 {
196 	void __iomem *base = pll->phy->pll_base;
197 
198 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
199 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
200 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
201 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
202 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
203 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
204 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
205 		  0xba);
206 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
207 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
208 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
209 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
210 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
211 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
212 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
213 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
214 		  0x4c);
215 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
216 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
217 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
218 }
219 
220 static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
221 {
222 	void __iomem *base = pll->phy->pll_base;
223 
224 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
225 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
226 		  config->decimal_div_start);
227 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
228 		  config->frac_div_start & 0xff);
229 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
230 		  (config->frac_div_start & 0xff00) >> 8);
231 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
232 		  (config->frac_div_start & 0x30000) >> 16);
233 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
234 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
235 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
236 	dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
237 		  config->pll_clock_inverters);
238 }
239 
240 static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
241 				     unsigned long parent_rate)
242 {
243 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
244 	struct dsi_pll_config config;
245 
246 	DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
247 	    parent_rate);
248 
249 	pll_10nm->vco_current_rate = rate;
250 
251 	dsi_pll_setup_config(&config);
252 
253 	dsi_pll_calc_dec_frac(pll_10nm, &config);
254 
255 	dsi_pll_calc_ssc(pll_10nm, &config);
256 
257 	dsi_pll_commit(pll_10nm, &config);
258 
259 	dsi_pll_config_hzindep_reg(pll_10nm);
260 
261 	dsi_pll_ssc_commit(pll_10nm, &config);
262 
263 	/* flush, ensure all register writes are done*/
264 	wmb();
265 
266 	return 0;
267 }
268 
269 static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
270 {
271 	struct device *dev = &pll->phy->pdev->dev;
272 	int rc;
273 	u32 status = 0;
274 	u32 const delay_us = 100;
275 	u32 const timeout_us = 5000;
276 
277 	rc = readl_poll_timeout_atomic(pll->phy->pll_base +
278 				       REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
279 				       status,
280 				       ((status & BIT(0)) > 0),
281 				       delay_us,
282 				       timeout_us);
283 	if (rc)
284 		DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
285 			      pll->phy->id, status);
286 
287 	return rc;
288 }
289 
290 static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
291 {
292 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
293 
294 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
295 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
296 		  data & ~BIT(5));
297 	ndelay(250);
298 }
299 
300 static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
301 {
302 	u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
303 
304 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
305 		  data | BIT(5));
306 	dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
307 	ndelay(250);
308 }
309 
310 static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
311 {
312 	u32 data;
313 
314 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
315 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
316 		  data & ~BIT(5));
317 }
318 
319 static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
320 {
321 	u32 data;
322 
323 	data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
324 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
325 		  data | BIT(5));
326 }
327 
328 static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
329 {
330 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
331 	struct device *dev = &pll_10nm->phy->pdev->dev;
332 	int rc;
333 
334 	dsi_pll_enable_pll_bias(pll_10nm);
335 	if (pll_10nm->slave)
336 		dsi_pll_enable_pll_bias(pll_10nm->slave);
337 
338 	rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
339 	if (rc) {
340 		DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
341 		return rc;
342 	}
343 
344 	/* Start PLL */
345 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
346 		  0x01);
347 
348 	/*
349 	 * ensure all PLL configurations are written prior to checking
350 	 * for PLL lock.
351 	 */
352 	wmb();
353 
354 	/* Check for PLL lock */
355 	rc = dsi_pll_10nm_lock_status(pll_10nm);
356 	if (rc) {
357 		DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
358 		goto error;
359 	}
360 
361 	pll_10nm->phy->pll_on = true;
362 
363 	dsi_pll_enable_global_clk(pll_10nm);
364 	if (pll_10nm->slave)
365 		dsi_pll_enable_global_clk(pll_10nm->slave);
366 
367 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
368 		  0x01);
369 	if (pll_10nm->slave)
370 		dsi_phy_write(pll_10nm->slave->phy->base +
371 			  REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
372 
373 error:
374 	return rc;
375 }
376 
377 static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
378 {
379 	dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
380 	dsi_pll_disable_pll_bias(pll);
381 }
382 
383 static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
384 {
385 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
386 
387 	/*
388 	 * To avoid any stray glitches while abruptly powering down the PLL
389 	 * make sure to gate the clock using the clock enable bit before
390 	 * powering down the PLL
391 	 */
392 	dsi_pll_disable_global_clk(pll_10nm);
393 	dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
394 	dsi_pll_disable_sub(pll_10nm);
395 	if (pll_10nm->slave) {
396 		dsi_pll_disable_global_clk(pll_10nm->slave);
397 		dsi_pll_disable_sub(pll_10nm->slave);
398 	}
399 	/* flush, ensure all register writes are done */
400 	wmb();
401 	pll_10nm->phy->pll_on = false;
402 }
403 
404 static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
405 						  unsigned long parent_rate)
406 {
407 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
408 	void __iomem *base = pll_10nm->phy->pll_base;
409 	u64 ref_clk = VCO_REF_CLK_RATE;
410 	u64 vco_rate = 0x0;
411 	u64 multiplier;
412 	u32 frac;
413 	u32 dec;
414 	u64 pll_freq, tmp64;
415 
416 	dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
417 	dec &= 0xff;
418 
419 	frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
420 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
421 		  0xff) << 8);
422 	frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
423 		  0x3) << 16);
424 
425 	/*
426 	 * TODO:
427 	 *	1. Assumes prescaler is disabled
428 	 */
429 	multiplier = 1 << FRAC_BITS;
430 	pll_freq = dec * (ref_clk * 2);
431 	tmp64 = (ref_clk * 2 * frac);
432 	pll_freq += div_u64(tmp64, multiplier);
433 
434 	vco_rate = pll_freq;
435 	pll_10nm->vco_current_rate = vco_rate;
436 
437 	DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
438 	    pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
439 
440 	return (unsigned long)vco_rate;
441 }
442 
443 static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
444 		unsigned long rate, unsigned long *parent_rate)
445 {
446 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
447 
448 	if      (rate < pll_10nm->phy->cfg->min_pll_rate)
449 		return  pll_10nm->phy->cfg->min_pll_rate;
450 	else if (rate > pll_10nm->phy->cfg->max_pll_rate)
451 		return  pll_10nm->phy->cfg->max_pll_rate;
452 	else
453 		return rate;
454 }
455 
456 static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
457 	.round_rate = dsi_pll_10nm_clk_round_rate,
458 	.set_rate = dsi_pll_10nm_vco_set_rate,
459 	.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
460 	.prepare = dsi_pll_10nm_vco_prepare,
461 	.unprepare = dsi_pll_10nm_vco_unprepare,
462 };
463 
464 /*
465  * PLL Callbacks
466  */
467 
468 static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
469 {
470 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
471 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
472 	void __iomem *phy_base = pll_10nm->phy->base;
473 	u32 cmn_clk_cfg0, cmn_clk_cfg1;
474 
475 	cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
476 				       REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
477 	cached->pll_out_div &= 0x3;
478 
479 	cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
480 	cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
481 	cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
482 
483 	cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
484 	cached->pll_mux = cmn_clk_cfg1 & 0x3;
485 
486 	DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
487 	    pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
488 	    cached->pix_clk_div, cached->pll_mux);
489 }
490 
491 static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
492 {
493 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
494 	struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
495 	void __iomem *phy_base = pll_10nm->phy->base;
496 	u32 val;
497 	int ret;
498 
499 	val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
500 	val &= ~0x3;
501 	val |= cached->pll_out_div;
502 	dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
503 
504 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
505 		  cached->bit_clk_div | (cached->pix_clk_div << 4));
506 
507 	val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
508 	val &= ~0x3;
509 	val |= cached->pll_mux;
510 	dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
511 
512 	ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
513 			pll_10nm->vco_current_rate,
514 			VCO_REF_CLK_RATE);
515 	if (ret) {
516 		DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
517 			"restore vco rate failed. ret=%d\n", ret);
518 		return ret;
519 	}
520 
521 	DBG("DSI PLL%d", pll_10nm->phy->id);
522 
523 	return 0;
524 }
525 
526 static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
527 {
528 	struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
529 	void __iomem *base = phy->base;
530 	u32 data = 0x0;	/* internal PLL */
531 
532 	DBG("DSI PLL%d", pll_10nm->phy->id);
533 
534 	switch (phy->usecase) {
535 	case MSM_DSI_PHY_STANDALONE:
536 		break;
537 	case MSM_DSI_PHY_MASTER:
538 		pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
539 		break;
540 	case MSM_DSI_PHY_SLAVE:
541 		data = 0x1; /* external PLL */
542 		break;
543 	default:
544 		return -EINVAL;
545 	}
546 
547 	/* set PLL src */
548 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
549 
550 	return 0;
551 }
552 
553 /*
554  * The post dividers and mux clocks are created using the standard divider and
555  * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
556  * state to follow the master PLL's divider/mux state. Therefore, we don't
557  * require special clock ops that also configure the slave PLL registers
558  */
559 static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
560 {
561 	char clk_name[32], parent[32], vco_name[32];
562 	char parent2[32], parent3[32], parent4[32];
563 	struct clk_init_data vco_init = {
564 		.parent_names = (const char *[]){ "xo" },
565 		.num_parents = 1,
566 		.name = vco_name,
567 		.flags = CLK_IGNORE_UNUSED,
568 		.ops = &clk_ops_dsi_pll_10nm_vco,
569 	};
570 	struct device *dev = &pll_10nm->phy->pdev->dev;
571 	struct clk_hw *hw;
572 	int ret;
573 
574 	DBG("DSI%d", pll_10nm->phy->id);
575 
576 	snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
577 	pll_10nm->clk_hw.init = &vco_init;
578 
579 	ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
580 	if (ret)
581 		return ret;
582 
583 	snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
584 	snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
585 
586 	hw = devm_clk_hw_register_divider(dev, clk_name,
587 				     parent, CLK_SET_RATE_PARENT,
588 				     pll_10nm->phy->pll_base +
589 				     REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
590 				     0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
591 	if (IS_ERR(hw)) {
592 		ret = PTR_ERR(hw);
593 		goto fail;
594 	}
595 
596 	snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
597 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
598 
599 	/* BIT CLK: DIV_CTRL_3_0 */
600 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
601 				     CLK_SET_RATE_PARENT,
602 				     pll_10nm->phy->base +
603 				     REG_DSI_10nm_PHY_CMN_CLK_CFG0,
604 				     0, 4, CLK_DIVIDER_ONE_BASED,
605 				     &pll_10nm->postdiv_lock);
606 	if (IS_ERR(hw)) {
607 		ret = PTR_ERR(hw);
608 		goto fail;
609 	}
610 
611 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
612 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
613 
614 	/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
615 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
616 					  CLK_SET_RATE_PARENT, 1, 8);
617 	if (IS_ERR(hw)) {
618 		ret = PTR_ERR(hw);
619 		goto fail;
620 	}
621 
622 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
623 
624 	snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
625 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
626 
627 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
628 					  0, 1, 2);
629 	if (IS_ERR(hw)) {
630 		ret = PTR_ERR(hw);
631 		goto fail;
632 	}
633 
634 	snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
635 	snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
636 
637 	hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
638 					  0, 1, 4);
639 	if (IS_ERR(hw)) {
640 		ret = PTR_ERR(hw);
641 		goto fail;
642 	}
643 
644 	snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
645 	snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
646 	snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
647 	snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
648 	snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
649 
650 	hw = devm_clk_hw_register_mux(dev, clk_name,
651 				 ((const char *[]){
652 				 parent, parent2, parent3, parent4
653 				 }), 4, 0, pll_10nm->phy->base +
654 				 REG_DSI_10nm_PHY_CMN_CLK_CFG1,
655 				 0, 2, 0, NULL);
656 	if (IS_ERR(hw)) {
657 		ret = PTR_ERR(hw);
658 		goto fail;
659 	}
660 
661 	snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
662 	snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
663 
664 	/* PIX CLK DIV : DIV_CTRL_7_4*/
665 	hw = devm_clk_hw_register_divider(dev, clk_name, parent,
666 				     0, pll_10nm->phy->base +
667 					REG_DSI_10nm_PHY_CMN_CLK_CFG0,
668 				     4, 4, CLK_DIVIDER_ONE_BASED,
669 				     &pll_10nm->postdiv_lock);
670 	if (IS_ERR(hw)) {
671 		ret = PTR_ERR(hw);
672 		goto fail;
673 	}
674 
675 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
676 
677 	return 0;
678 
679 fail:
680 
681 	return ret;
682 }
683 
684 static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
685 {
686 	struct platform_device *pdev = phy->pdev;
687 	struct dsi_pll_10nm *pll_10nm;
688 	int ret;
689 
690 	pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
691 	if (!pll_10nm)
692 		return -ENOMEM;
693 
694 	DBG("DSI PLL%d", phy->id);
695 
696 	pll_10nm_list[phy->id] = pll_10nm;
697 
698 	spin_lock_init(&pll_10nm->postdiv_lock);
699 
700 	pll_10nm->phy = phy;
701 
702 	ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
703 	if (ret) {
704 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
705 		return ret;
706 	}
707 
708 	phy->vco_hw = &pll_10nm->clk_hw;
709 
710 	/* TODO: Remove this when we have proper display handover support */
711 	msm_dsi_phy_pll_save_state(phy);
712 
713 	return 0;
714 }
715 
716 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
717 {
718 	void __iomem *base = phy->base;
719 	u32 data = 0;
720 
721 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
722 	mb(); /* make sure read happened */
723 
724 	return (data & BIT(0));
725 }
726 
727 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
728 {
729 	void __iomem *lane_base = phy->lane_base;
730 	int phy_lane_0 = 0;	/* TODO: Support all lane swap configs */
731 
732 	/*
733 	 * LPRX and CDRX need to enabled only for physical data lane
734 	 * corresponding to the logical data lane 0
735 	 */
736 	if (enable)
737 		dsi_phy_write(lane_base +
738 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
739 	else
740 		dsi_phy_write(lane_base +
741 			      REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
742 }
743 
744 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
745 {
746 	int i;
747 	u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
748 	void __iomem *lane_base = phy->lane_base;
749 
750 	if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
751 		tx_dctrl[3] = 0x02;
752 
753 	/* Strength ctrl settings */
754 	for (i = 0; i < 5; i++) {
755 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
756 			      0x55);
757 		/*
758 		 * Disable LPRX and CDRX for all lanes. And later on, it will
759 		 * be only enabled for the physical data lane corresponding
760 		 * to the logical data lane 0
761 		 */
762 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
763 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
764 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
765 			      0x88);
766 	}
767 
768 	dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
769 
770 	/* other settings */
771 	for (i = 0; i < 5; i++) {
772 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
773 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
774 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
775 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
776 			      i == 4 ? 0x80 : 0x0);
777 		dsi_phy_write(lane_base +
778 			      REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
779 		dsi_phy_write(lane_base +
780 			      REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
781 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
782 			      tx_dctrl[i]);
783 	}
784 
785 	if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
786 		/* Toggle BIT 0 to release freeze I/0 */
787 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
788 		dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
789 	}
790 }
791 
792 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
793 			       struct msm_dsi_phy_clk_request *clk_req)
794 {
795 	int ret;
796 	u32 status;
797 	u32 const delay_us = 5;
798 	u32 const timeout_us = 1000;
799 	struct msm_dsi_dphy_timing *timing = &phy->timing;
800 	void __iomem *base = phy->base;
801 	u32 data;
802 
803 	DBG("");
804 
805 	if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
806 		DRM_DEV_ERROR(&phy->pdev->dev,
807 			"%s: D-PHY timing calculation failed\n", __func__);
808 		return -EINVAL;
809 	}
810 
811 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
812 		pr_warn("PLL turned on before configuring PHY\n");
813 
814 	/* wait for REFGEN READY */
815 	ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
816 					status, (status & BIT(0)),
817 					delay_us, timeout_us);
818 	if (ret) {
819 		pr_err("Ref gen not ready. Aborting\n");
820 		return -EINVAL;
821 	}
822 
823 	/* de-assert digital and pll power down */
824 	data = BIT(6) | BIT(5);
825 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
826 
827 	/* Assert PLL core reset */
828 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
829 
830 	/* turn off resync FIFO */
831 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
832 
833 	/* Select MS1 byte-clk */
834 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
835 
836 	/* Enable LDO */
837 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
838 
839 	/* Configure PHY lane swap (TODO: we need to calculate this) */
840 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
841 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
842 
843 	/* DSI PHY timings */
844 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
845 		      timing->hs_halfbyte_en);
846 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
847 		      timing->clk_zero);
848 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
849 		      timing->clk_prepare);
850 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
851 		      timing->clk_trail);
852 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
853 		      timing->hs_exit);
854 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
855 		      timing->hs_zero);
856 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
857 		      timing->hs_prepare);
858 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
859 		      timing->hs_trail);
860 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
861 		      timing->hs_rqst);
862 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
863 		      timing->ta_go | (timing->ta_sure << 3));
864 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
865 		      timing->ta_get);
866 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
867 		      0x00);
868 
869 	/* Remove power down from all blocks */
870 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
871 
872 	/* power up lanes */
873 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
874 
875 	/* TODO: only power up lanes that are used */
876 	data |= 0x1F;
877 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
878 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
879 
880 	/* Select full-rate mode */
881 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
882 
883 	ret = dsi_10nm_set_usecase(phy);
884 	if (ret) {
885 		DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
886 			__func__, ret);
887 		return ret;
888 	}
889 
890 	/* DSI lane settings */
891 	dsi_phy_hw_v3_0_lane_settings(phy);
892 
893 	DBG("DSI%d PHY enabled", phy->id);
894 
895 	return 0;
896 }
897 
898 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
899 {
900 	void __iomem *base = phy->base;
901 	u32 data;
902 
903 	DBG("");
904 
905 	if (dsi_phy_hw_v3_0_is_pll_on(phy))
906 		pr_warn("Turning OFF PHY while PLL is on\n");
907 
908 	dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
909 	data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
910 
911 	/* disable all lanes */
912 	data &= ~0x1F;
913 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
914 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
915 
916 	/* Turn off all PHY blocks */
917 	dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
918 	/* make sure phy is turned off */
919 	wmb();
920 
921 	DBG("DSI%d PHY disabled", phy->id);
922 }
923 
924 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
925 	.has_phy_lane = true,
926 	.reg_cfg = {
927 		.num = 1,
928 		.regs = {
929 			{"vdds", 36000, 32},
930 		},
931 	},
932 	.ops = {
933 		.enable = dsi_10nm_phy_enable,
934 		.disable = dsi_10nm_phy_disable,
935 		.pll_init = dsi_pll_10nm_init,
936 		.save_pll_state = dsi_10nm_pll_save_state,
937 		.restore_pll_state = dsi_10nm_pll_restore_state,
938 	},
939 	.min_pll_rate = 1000000000UL,
940 	.max_pll_rate = 3500000000UL,
941 	.io_start = { 0xae94400, 0xae96400 },
942 	.num_dsi_phy = 2,
943 };
944 
945 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
946 	.has_phy_lane = true,
947 	.reg_cfg = {
948 		.num = 1,
949 		.regs = {
950 			{"vdds", 36000, 32},
951 		},
952 	},
953 	.ops = {
954 		.enable = dsi_10nm_phy_enable,
955 		.disable = dsi_10nm_phy_disable,
956 		.pll_init = dsi_pll_10nm_init,
957 		.save_pll_state = dsi_10nm_pll_save_state,
958 		.restore_pll_state = dsi_10nm_pll_restore_state,
959 	},
960 	.min_pll_rate = 1000000000UL,
961 	.max_pll_rate = 3500000000UL,
962 	.io_start = { 0xc994400, 0xc996400 },
963 	.num_dsi_phy = 2,
964 	.quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
965 };
966