1 /* 2 * SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2018, The Linux Foundation 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/clk-provider.h> 8 #include <linux/iopoll.h> 9 10 #include "dsi_phy.h" 11 #include "dsi.xml.h" 12 #include "dsi_phy_10nm.xml.h" 13 14 /* 15 * DSI PLL 10nm - clock diagram (eg: DSI0): 16 * 17 * dsi0_pll_out_div_clk dsi0_pll_bit_clk 18 * | | 19 * | | 20 * +---------+ | +----------+ | +----+ 21 * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk 22 * +---------+ | +----------+ | +----+ 23 * | | 24 * | | dsi0_pll_by_2_bit_clk 25 * | | | 26 * | | +----+ | |\ dsi0_pclk_mux 27 * | |--| /2 |--o--| \ | 28 * | | +----+ | \ | +---------+ 29 * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk 30 * |------------------------------| / +---------+ 31 * | +-----+ | / 32 * -----------| /4? |--o----------|/ 33 * +-----+ | | 34 * | |dsiclk_sel 35 * | 36 * dsi0_pll_post_out_div_clk 37 */ 38 39 #define VCO_REF_CLK_RATE 19200000 40 #define FRAC_BITS 18 41 42 /* v3.0.0 10nm implementation that requires the old timings settings */ 43 #define DSI_PHY_10NM_QUIRK_OLD_TIMINGS BIT(0) 44 45 struct dsi_pll_config { 46 bool enable_ssc; 47 bool ssc_center; 48 u32 ssc_freq; 49 u32 ssc_offset; 50 u32 ssc_adj_per; 51 52 /* out */ 53 u32 pll_prop_gain_rate; 54 u32 decimal_div_start; 55 u32 frac_div_start; 56 u32 pll_clock_inverters; 57 u32 ssc_stepsize; 58 u32 ssc_div_per; 59 }; 60 61 struct pll_10nm_cached_state { 62 unsigned long vco_rate; 63 u8 bit_clk_div; 64 u8 pix_clk_div; 65 u8 pll_out_div; 66 u8 pll_mux; 67 }; 68 69 struct dsi_pll_10nm { 70 struct clk_hw clk_hw; 71 72 struct msm_dsi_phy *phy; 73 74 u64 vco_current_rate; 75 76 /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */ 77 spinlock_t postdiv_lock; 78 79 struct pll_10nm_cached_state cached_state; 80 81 struct dsi_pll_10nm *slave; 82 }; 83 84 #define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw) 85 86 /** 87 * struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters. 88 * @rescode_offset_top: Offset for pull-up legs rescode. 89 * @rescode_offset_bot: Offset for pull-down legs rescode. 90 * @vreg_ctrl: vreg ctrl to drive LDO level 91 */ 92 struct dsi_phy_10nm_tuning_cfg { 93 u8 rescode_offset_top[DSI_LANE_MAX]; 94 u8 rescode_offset_bot[DSI_LANE_MAX]; 95 u8 vreg_ctrl; 96 }; 97 98 /* 99 * Global list of private DSI PLL struct pointers. We need this for bonded DSI 100 * mode, where the master PLL's clk_ops needs access the slave's private data 101 */ 102 static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; 103 104 static void dsi_pll_setup_config(struct dsi_pll_config *config) 105 { 106 config->ssc_freq = 31500; 107 config->ssc_offset = 5000; 108 config->ssc_adj_per = 2; 109 110 config->enable_ssc = false; 111 config->ssc_center = false; 112 } 113 114 static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 115 { 116 u64 fref = VCO_REF_CLK_RATE; 117 u64 pll_freq; 118 u64 divider; 119 u64 dec, dec_multiple; 120 u32 frac; 121 u64 multiplier; 122 123 pll_freq = pll->vco_current_rate; 124 125 divider = fref * 2; 126 127 multiplier = 1 << FRAC_BITS; 128 dec_multiple = div_u64(pll_freq * multiplier, divider); 129 dec = div_u64_rem(dec_multiple, multiplier, &frac); 130 131 if (pll_freq <= 1900000000UL) 132 config->pll_prop_gain_rate = 8; 133 else if (pll_freq <= 3000000000UL) 134 config->pll_prop_gain_rate = 10; 135 else 136 config->pll_prop_gain_rate = 12; 137 if (pll_freq < 1100000000UL) 138 config->pll_clock_inverters = 8; 139 else 140 config->pll_clock_inverters = 0; 141 142 config->decimal_div_start = dec; 143 config->frac_div_start = frac; 144 } 145 146 #define SSC_CENTER BIT(0) 147 #define SSC_EN BIT(1) 148 149 static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 150 { 151 u32 ssc_per; 152 u32 ssc_mod; 153 u64 ssc_step_size; 154 u64 frac; 155 156 if (!config->enable_ssc) { 157 DBG("SSC not enabled\n"); 158 return; 159 } 160 161 ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1; 162 ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); 163 ssc_per -= ssc_mod; 164 165 frac = config->frac_div_start; 166 ssc_step_size = config->decimal_div_start; 167 ssc_step_size *= (1 << FRAC_BITS); 168 ssc_step_size += frac; 169 ssc_step_size *= config->ssc_offset; 170 ssc_step_size *= (config->ssc_adj_per + 1); 171 ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); 172 ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); 173 174 config->ssc_div_per = ssc_per; 175 config->ssc_stepsize = ssc_step_size; 176 177 pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", 178 config->decimal_div_start, frac, FRAC_BITS); 179 pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", 180 ssc_per, (u32)ssc_step_size, config->ssc_adj_per); 181 } 182 183 static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 184 { 185 void __iomem *base = pll->phy->pll_base; 186 187 if (config->enable_ssc) { 188 pr_debug("SSC is enabled\n"); 189 190 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1, 191 config->ssc_stepsize & 0xff); 192 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1, 193 config->ssc_stepsize >> 8); 194 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1, 195 config->ssc_div_per & 0xff); 196 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1, 197 config->ssc_div_per >> 8); 198 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1, 199 config->ssc_adj_per & 0xff); 200 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1, 201 config->ssc_adj_per >> 8); 202 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL, 203 SSC_EN | (config->ssc_center ? SSC_CENTER : 0)); 204 } 205 } 206 207 static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll) 208 { 209 void __iomem *base = pll->phy->pll_base; 210 211 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80); 212 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03); 213 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00); 214 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00); 215 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e); 216 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40); 217 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 218 0xba); 219 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 220 0x0c); 221 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00); 222 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00); 223 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 224 0x08); 225 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08); 226 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0); 227 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 228 0xfa); 229 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 230 0x4c); 231 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80); 232 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29); 233 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f); 234 } 235 236 static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config) 237 { 238 void __iomem *base = pll->phy->pll_base; 239 240 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12); 241 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1, 242 config->decimal_div_start); 243 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1, 244 config->frac_div_start & 0xff); 245 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1, 246 (config->frac_div_start & 0xff00) >> 8); 247 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1, 248 (config->frac_div_start & 0x30000) >> 16); 249 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64); 250 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); 251 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10); 252 dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS, 253 config->pll_clock_inverters); 254 } 255 256 static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, 257 unsigned long parent_rate) 258 { 259 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 260 struct dsi_pll_config config; 261 262 DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate, 263 parent_rate); 264 265 pll_10nm->vco_current_rate = rate; 266 267 dsi_pll_setup_config(&config); 268 269 dsi_pll_calc_dec_frac(pll_10nm, &config); 270 271 dsi_pll_calc_ssc(pll_10nm, &config); 272 273 dsi_pll_commit(pll_10nm, &config); 274 275 dsi_pll_config_hzindep_reg(pll_10nm); 276 277 dsi_pll_ssc_commit(pll_10nm, &config); 278 279 /* flush, ensure all register writes are done*/ 280 wmb(); 281 282 return 0; 283 } 284 285 static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) 286 { 287 struct device *dev = &pll->phy->pdev->dev; 288 int rc; 289 u32 status = 0; 290 u32 const delay_us = 100; 291 u32 const timeout_us = 5000; 292 293 rc = readl_poll_timeout_atomic(pll->phy->pll_base + 294 REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE, 295 status, 296 ((status & BIT(0)) > 0), 297 delay_us, 298 timeout_us); 299 if (rc) 300 DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n", 301 pll->phy->id, status); 302 303 return rc; 304 } 305 306 static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll) 307 { 308 u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); 309 310 dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0); 311 dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0, 312 data & ~BIT(5)); 313 ndelay(250); 314 } 315 316 static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll) 317 { 318 u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); 319 320 dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0, 321 data | BIT(5)); 322 dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0); 323 ndelay(250); 324 } 325 326 static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll) 327 { 328 u32 data; 329 330 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 331 dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, 332 data & ~BIT(5)); 333 } 334 335 static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll) 336 { 337 u32 data; 338 339 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 340 dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, 341 data | BIT(5)); 342 } 343 344 static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) 345 { 346 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 347 struct device *dev = &pll_10nm->phy->pdev->dev; 348 int rc; 349 350 dsi_pll_enable_pll_bias(pll_10nm); 351 if (pll_10nm->slave) 352 dsi_pll_enable_pll_bias(pll_10nm->slave); 353 354 rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); 355 if (rc) { 356 DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc); 357 return rc; 358 } 359 360 /* Start PLL */ 361 dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 362 0x01); 363 364 /* 365 * ensure all PLL configurations are written prior to checking 366 * for PLL lock. 367 */ 368 wmb(); 369 370 /* Check for PLL lock */ 371 rc = dsi_pll_10nm_lock_status(pll_10nm); 372 if (rc) { 373 DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id); 374 goto error; 375 } 376 377 pll_10nm->phy->pll_on = true; 378 379 dsi_pll_enable_global_clk(pll_10nm); 380 if (pll_10nm->slave) 381 dsi_pll_enable_global_clk(pll_10nm->slave); 382 383 dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 384 0x01); 385 if (pll_10nm->slave) 386 dsi_phy_write(pll_10nm->slave->phy->base + 387 REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01); 388 389 error: 390 return rc; 391 } 392 393 static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll) 394 { 395 dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0); 396 dsi_pll_disable_pll_bias(pll); 397 } 398 399 static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw) 400 { 401 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 402 403 /* 404 * To avoid any stray glitches while abruptly powering down the PLL 405 * make sure to gate the clock using the clock enable bit before 406 * powering down the PLL 407 */ 408 dsi_pll_disable_global_clk(pll_10nm); 409 dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0); 410 dsi_pll_disable_sub(pll_10nm); 411 if (pll_10nm->slave) { 412 dsi_pll_disable_global_clk(pll_10nm->slave); 413 dsi_pll_disable_sub(pll_10nm->slave); 414 } 415 /* flush, ensure all register writes are done */ 416 wmb(); 417 pll_10nm->phy->pll_on = false; 418 } 419 420 static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, 421 unsigned long parent_rate) 422 { 423 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 424 void __iomem *base = pll_10nm->phy->pll_base; 425 u64 ref_clk = VCO_REF_CLK_RATE; 426 u64 vco_rate = 0x0; 427 u64 multiplier; 428 u32 frac; 429 u32 dec; 430 u64 pll_freq, tmp64; 431 432 dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1); 433 dec &= 0xff; 434 435 frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1); 436 frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) & 437 0xff) << 8); 438 frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & 439 0x3) << 16); 440 441 /* 442 * TODO: 443 * 1. Assumes prescaler is disabled 444 */ 445 multiplier = 1 << FRAC_BITS; 446 pll_freq = dec * (ref_clk * 2); 447 tmp64 = (ref_clk * 2 * frac); 448 pll_freq += div_u64(tmp64, multiplier); 449 450 vco_rate = pll_freq; 451 pll_10nm->vco_current_rate = vco_rate; 452 453 DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", 454 pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac); 455 456 return (unsigned long)vco_rate; 457 } 458 459 static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw, 460 unsigned long rate, unsigned long *parent_rate) 461 { 462 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw); 463 464 if (rate < pll_10nm->phy->cfg->min_pll_rate) 465 return pll_10nm->phy->cfg->min_pll_rate; 466 else if (rate > pll_10nm->phy->cfg->max_pll_rate) 467 return pll_10nm->phy->cfg->max_pll_rate; 468 else 469 return rate; 470 } 471 472 static const struct clk_ops clk_ops_dsi_pll_10nm_vco = { 473 .round_rate = dsi_pll_10nm_clk_round_rate, 474 .set_rate = dsi_pll_10nm_vco_set_rate, 475 .recalc_rate = dsi_pll_10nm_vco_recalc_rate, 476 .prepare = dsi_pll_10nm_vco_prepare, 477 .unprepare = dsi_pll_10nm_vco_unprepare, 478 }; 479 480 /* 481 * PLL Callbacks 482 */ 483 484 static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy) 485 { 486 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); 487 struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; 488 void __iomem *phy_base = pll_10nm->phy->base; 489 u32 cmn_clk_cfg0, cmn_clk_cfg1; 490 491 cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base + 492 REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); 493 cached->pll_out_div &= 0x3; 494 495 cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); 496 cached->bit_clk_div = cmn_clk_cfg0 & 0xf; 497 cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; 498 499 cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 500 cached->pll_mux = cmn_clk_cfg1 & 0x3; 501 502 DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", 503 pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div, 504 cached->pix_clk_div, cached->pll_mux); 505 } 506 507 static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy) 508 { 509 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); 510 struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; 511 void __iomem *phy_base = pll_10nm->phy->base; 512 u32 val; 513 int ret; 514 515 val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); 516 val &= ~0x3; 517 val |= cached->pll_out_div; 518 dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val); 519 520 dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, 521 cached->bit_clk_div | (cached->pix_clk_div << 4)); 522 523 val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); 524 val &= ~0x3; 525 val |= cached->pll_mux; 526 dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val); 527 528 ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw, 529 pll_10nm->vco_current_rate, 530 VCO_REF_CLK_RATE); 531 if (ret) { 532 DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev, 533 "restore vco rate failed. ret=%d\n", ret); 534 return ret; 535 } 536 537 DBG("DSI PLL%d", pll_10nm->phy->id); 538 539 return 0; 540 } 541 542 static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy) 543 { 544 struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw); 545 void __iomem *base = phy->base; 546 u32 data = 0x0; /* internal PLL */ 547 548 DBG("DSI PLL%d", pll_10nm->phy->id); 549 550 switch (phy->usecase) { 551 case MSM_DSI_PHY_STANDALONE: 552 break; 553 case MSM_DSI_PHY_MASTER: 554 pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX]; 555 break; 556 case MSM_DSI_PHY_SLAVE: 557 data = 0x1; /* external PLL */ 558 break; 559 default: 560 return -EINVAL; 561 } 562 563 /* set PLL src */ 564 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2)); 565 566 return 0; 567 } 568 569 /* 570 * The post dividers and mux clocks are created using the standard divider and 571 * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux 572 * state to follow the master PLL's divider/mux state. Therefore, we don't 573 * require special clock ops that also configure the slave PLL registers 574 */ 575 static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks) 576 { 577 char clk_name[32]; 578 struct clk_init_data vco_init = { 579 .parent_data = &(const struct clk_parent_data) { 580 .fw_name = "ref", 581 }, 582 .num_parents = 1, 583 .name = clk_name, 584 .flags = CLK_IGNORE_UNUSED, 585 .ops = &clk_ops_dsi_pll_10nm_vco, 586 }; 587 struct device *dev = &pll_10nm->phy->pdev->dev; 588 struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit; 589 struct clk_hw *pll_post_out_div, *pclk_mux; 590 int ret; 591 592 DBG("DSI%d", pll_10nm->phy->id); 593 594 snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id); 595 pll_10nm->clk_hw.init = &vco_init; 596 597 ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw); 598 if (ret) 599 return ret; 600 601 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id); 602 603 pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name, 604 &pll_10nm->clk_hw, CLK_SET_RATE_PARENT, 605 pll_10nm->phy->pll_base + 606 REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, 607 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); 608 if (IS_ERR(pll_out_div)) { 609 ret = PTR_ERR(pll_out_div); 610 goto fail; 611 } 612 613 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id); 614 615 /* BIT CLK: DIV_CTRL_3_0 */ 616 pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name, 617 pll_out_div, CLK_SET_RATE_PARENT, 618 pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, 619 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); 620 if (IS_ERR(pll_bit)) { 621 ret = PTR_ERR(pll_bit); 622 goto fail; 623 } 624 625 snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id); 626 627 /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ 628 hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, 629 pll_bit, CLK_SET_RATE_PARENT, 1, 8); 630 if (IS_ERR(hw)) { 631 ret = PTR_ERR(hw); 632 goto fail; 633 } 634 635 provided_clocks[DSI_BYTE_PLL_CLK] = hw; 636 637 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id); 638 639 pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev, 640 clk_name, pll_bit, 0, 1, 2); 641 if (IS_ERR(pll_by_2_bit)) { 642 ret = PTR_ERR(pll_by_2_bit); 643 goto fail; 644 } 645 646 snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id); 647 648 pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev, 649 clk_name, pll_out_div, 0, 1, 4); 650 if (IS_ERR(pll_post_out_div)) { 651 ret = PTR_ERR(pll_post_out_div); 652 goto fail; 653 } 654 655 snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id); 656 657 pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name, 658 ((const struct clk_hw *[]){ 659 pll_bit, 660 pll_by_2_bit, 661 pll_out_div, 662 pll_post_out_div, 663 }), 4, 0, pll_10nm->phy->base + 664 REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL); 665 if (IS_ERR(pclk_mux)) { 666 ret = PTR_ERR(pclk_mux); 667 goto fail; 668 } 669 670 snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id); 671 672 /* PIX CLK DIV : DIV_CTRL_7_4*/ 673 hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux, 674 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, 675 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); 676 if (IS_ERR(hw)) { 677 ret = PTR_ERR(hw); 678 goto fail; 679 } 680 681 provided_clocks[DSI_PIXEL_PLL_CLK] = hw; 682 683 return 0; 684 685 fail: 686 687 return ret; 688 } 689 690 static int dsi_pll_10nm_init(struct msm_dsi_phy *phy) 691 { 692 struct platform_device *pdev = phy->pdev; 693 struct dsi_pll_10nm *pll_10nm; 694 int ret; 695 696 pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL); 697 if (!pll_10nm) 698 return -ENOMEM; 699 700 DBG("DSI PLL%d", phy->id); 701 702 pll_10nm_list[phy->id] = pll_10nm; 703 704 spin_lock_init(&pll_10nm->postdiv_lock); 705 706 pll_10nm->phy = phy; 707 708 ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws); 709 if (ret) { 710 DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); 711 return ret; 712 } 713 714 phy->vco_hw = &pll_10nm->clk_hw; 715 716 /* TODO: Remove this when we have proper display handover support */ 717 msm_dsi_phy_pll_save_state(phy); 718 719 return 0; 720 } 721 722 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy) 723 { 724 void __iomem *base = phy->base; 725 u32 data = 0; 726 727 data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); 728 mb(); /* make sure read happened */ 729 730 return (data & BIT(0)); 731 } 732 733 static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) 734 { 735 void __iomem *lane_base = phy->lane_base; 736 int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ 737 738 /* 739 * LPRX and CDRX need to enabled only for physical data lane 740 * corresponding to the logical data lane 0 741 */ 742 if (enable) 743 dsi_phy_write(lane_base + 744 REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3); 745 else 746 dsi_phy_write(lane_base + 747 REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0); 748 } 749 750 static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) 751 { 752 int i; 753 u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; 754 void __iomem *lane_base = phy->lane_base; 755 struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg; 756 757 if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS) 758 tx_dctrl[3] = 0x02; 759 760 /* Strength ctrl settings */ 761 for (i = 0; i < 5; i++) { 762 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i), 763 0x55); 764 /* 765 * Disable LPRX and CDRX for all lanes. And later on, it will 766 * be only enabled for the physical data lane corresponding 767 * to the logical data lane 0 768 */ 769 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0); 770 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0); 771 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i), 772 0x88); 773 } 774 775 dsi_phy_hw_v3_0_config_lpcdrx(phy, true); 776 777 /* other settings */ 778 for (i = 0; i < 5; i++) { 779 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0); 780 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0); 781 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0); 782 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i), 783 i == 4 ? 0x80 : 0x0); 784 785 /* platform specific dsi phy drive strength adjustment */ 786 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 787 tuning_cfg->rescode_offset_top[i]); 788 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 789 tuning_cfg->rescode_offset_bot[i]); 790 791 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i), 792 tx_dctrl[i]); 793 } 794 795 if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) { 796 /* Toggle BIT 0 to release freeze I/0 */ 797 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05); 798 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 799 } 800 } 801 802 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, 803 struct msm_dsi_phy_clk_request *clk_req) 804 { 805 int ret; 806 u32 status; 807 u32 const delay_us = 5; 808 u32 const timeout_us = 1000; 809 struct msm_dsi_dphy_timing *timing = &phy->timing; 810 void __iomem *base = phy->base; 811 struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg; 812 u32 data; 813 814 DBG(""); 815 816 if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) { 817 DRM_DEV_ERROR(&phy->pdev->dev, 818 "%s: D-PHY timing calculation failed\n", __func__); 819 return -EINVAL; 820 } 821 822 if (dsi_phy_hw_v3_0_is_pll_on(phy)) 823 pr_warn("PLL turned on before configuring PHY\n"); 824 825 /* wait for REFGEN READY */ 826 ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS, 827 status, (status & BIT(0)), 828 delay_us, timeout_us); 829 if (ret) { 830 pr_err("Ref gen not ready. Aborting\n"); 831 return -EINVAL; 832 } 833 834 /* de-assert digital and pll power down */ 835 data = BIT(6) | BIT(5); 836 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); 837 838 /* Assert PLL core reset */ 839 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00); 840 841 /* turn off resync FIFO */ 842 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00); 843 844 /* Select MS1 byte-clk */ 845 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10); 846 847 /* Enable LDO with platform specific drive level/amplitude adjustment */ 848 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 849 tuning_cfg->vreg_ctrl); 850 851 /* Configure PHY lane swap (TODO: we need to calculate this) */ 852 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21); 853 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84); 854 855 /* DSI PHY timings */ 856 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0, 857 timing->hs_halfbyte_en); 858 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1, 859 timing->clk_zero); 860 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2, 861 timing->clk_prepare); 862 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3, 863 timing->clk_trail); 864 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4, 865 timing->hs_exit); 866 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5, 867 timing->hs_zero); 868 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6, 869 timing->hs_prepare); 870 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7, 871 timing->hs_trail); 872 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8, 873 timing->hs_rqst); 874 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9, 875 timing->ta_go | (timing->ta_sure << 3)); 876 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10, 877 timing->ta_get); 878 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11, 879 0x00); 880 881 /* Remove power down from all blocks */ 882 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f); 883 884 /* power up lanes */ 885 data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0); 886 887 /* TODO: only power up lanes that are used */ 888 data |= 0x1F; 889 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); 890 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F); 891 892 /* Select full-rate mode */ 893 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40); 894 895 ret = dsi_10nm_set_usecase(phy); 896 if (ret) { 897 DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", 898 __func__, ret); 899 return ret; 900 } 901 902 /* DSI lane settings */ 903 dsi_phy_hw_v3_0_lane_settings(phy); 904 905 DBG("DSI%d PHY enabled", phy->id); 906 907 return 0; 908 } 909 910 static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy) 911 { 912 void __iomem *base = phy->base; 913 u32 data; 914 915 DBG(""); 916 917 if (dsi_phy_hw_v3_0_is_pll_on(phy)) 918 pr_warn("Turning OFF PHY while PLL is on\n"); 919 920 dsi_phy_hw_v3_0_config_lpcdrx(phy, false); 921 data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0); 922 923 /* disable all lanes */ 924 data &= ~0x1F; 925 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); 926 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0); 927 928 /* Turn off all PHY blocks */ 929 dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00); 930 /* make sure phy is turned off */ 931 wmb(); 932 933 DBG("DSI%d PHY disabled", phy->id); 934 } 935 936 static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy) 937 { 938 struct device *dev = &phy->pdev->dev; 939 struct dsi_phy_10nm_tuning_cfg *tuning_cfg; 940 s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */ 941 s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */ 942 u32 ldo_level = 400; /* 400mV */ 943 u8 level; 944 int ret, i; 945 946 tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL); 947 if (!tuning_cfg) 948 return -ENOMEM; 949 950 /* Drive strength adjustment parameters */ 951 ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top", 952 offset_top, DSI_LANE_MAX); 953 if (ret && ret != -EINVAL) { 954 DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret); 955 return ret; 956 } 957 958 for (i = 0; i < DSI_LANE_MAX; i++) { 959 if (offset_top[i] < -32 || offset_top[i] > 31) { 960 DRM_DEV_ERROR(dev, 961 "qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n", 962 offset_top[i]); 963 return -EINVAL; 964 } 965 tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i]; 966 } 967 968 ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot", 969 offset_bot, DSI_LANE_MAX); 970 if (ret && ret != -EINVAL) { 971 DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret); 972 return ret; 973 } 974 975 for (i = 0; i < DSI_LANE_MAX; i++) { 976 if (offset_bot[i] < -32 || offset_bot[i] > 31) { 977 DRM_DEV_ERROR(dev, 978 "qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n", 979 offset_bot[i]); 980 return -EINVAL; 981 } 982 tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i]; 983 } 984 985 /* Drive level/amplitude adjustment parameters */ 986 ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level); 987 if (ret && ret != -EINVAL) { 988 DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret); 989 return ret; 990 } 991 992 switch (ldo_level) { 993 case 375: 994 level = 0; 995 break; 996 case 400: 997 level = 1; 998 break; 999 case 425: 1000 level = 2; 1001 break; 1002 case 450: 1003 level = 3; 1004 break; 1005 case 475: 1006 level = 4; 1007 break; 1008 case 500: 1009 level = 5; 1010 break; 1011 default: 1012 DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level); 1013 return -EINVAL; 1014 } 1015 tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level); 1016 1017 phy->tuning_cfg = tuning_cfg; 1018 1019 return 0; 1020 } 1021 1022 static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = { 1023 { .supply = "vdds", .init_load_uA = 36000 }, 1024 }; 1025 1026 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { 1027 .has_phy_lane = true, 1028 .regulator_data = dsi_phy_10nm_regulators, 1029 .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), 1030 .ops = { 1031 .enable = dsi_10nm_phy_enable, 1032 .disable = dsi_10nm_phy_disable, 1033 .pll_init = dsi_pll_10nm_init, 1034 .save_pll_state = dsi_10nm_pll_save_state, 1035 .restore_pll_state = dsi_10nm_pll_restore_state, 1036 .parse_dt_properties = dsi_10nm_phy_parse_dt, 1037 }, 1038 .min_pll_rate = 1000000000UL, 1039 .max_pll_rate = 3500000000UL, 1040 .io_start = { 0xae94400, 0xae96400 }, 1041 .num_dsi_phy = 2, 1042 }; 1043 1044 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = { 1045 .has_phy_lane = true, 1046 .regulator_data = dsi_phy_10nm_regulators, 1047 .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), 1048 .ops = { 1049 .enable = dsi_10nm_phy_enable, 1050 .disable = dsi_10nm_phy_disable, 1051 .pll_init = dsi_pll_10nm_init, 1052 .save_pll_state = dsi_10nm_pll_save_state, 1053 .restore_pll_state = dsi_10nm_pll_restore_state, 1054 .parse_dt_properties = dsi_10nm_phy_parse_dt, 1055 }, 1056 .min_pll_rate = 1000000000UL, 1057 .max_pll_rate = 3500000000UL, 1058 .io_start = { 0xc994400, 0xc996400 }, 1059 .num_dsi_phy = 2, 1060 .quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS, 1061 }; 1062