1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2017, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/clk-provider.h> 8 #include <linux/delay.h> 9 #include <linux/err.h> 10 #include <linux/io.h> 11 #include <linux/iopoll.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/of_address.h> 17 #include <linux/phy/phy.h> 18 #include <linux/platform_device.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/reset.h> 21 #include <linux/slab.h> 22 23 #include "phy-qcom-qmp.h" 24 25 /* QPHY_SW_RESET bit */ 26 #define SW_RESET BIT(0) 27 /* QPHY_POWER_DOWN_CONTROL */ 28 #define SW_PWRDN BIT(0) 29 #define REFCLK_DRV_DSBL BIT(1) 30 /* QPHY_START_CONTROL bits */ 31 #define SERDES_START BIT(0) 32 #define PCS_START BIT(1) 33 #define PLL_READY_GATE_EN BIT(3) 34 /* QPHY_PCS_STATUS bit */ 35 #define PHYSTATUS BIT(6) 36 /* QPHY_COM_PCS_READY_STATUS bit */ 37 #define PCS_READY BIT(0) 38 39 #define PHY_INIT_COMPLETE_TIMEOUT 10000 40 #define POWER_DOWN_DELAY_US_MIN 10 41 #define POWER_DOWN_DELAY_US_MAX 20 42 43 struct qmp_phy_init_tbl { 44 unsigned int offset; 45 unsigned int val; 46 /* 47 * mask of lanes for which this register is written 48 * for cases when second lane needs different values 49 */ 50 u8 lane_mask; 51 }; 52 53 #define QMP_PHY_INIT_CFG(o, v) \ 54 { \ 55 .offset = o, \ 56 .val = v, \ 57 .lane_mask = 0xff, \ 58 } 59 60 #define QMP_PHY_INIT_CFG_LANE(o, v, l) \ 61 { \ 62 .offset = o, \ 63 .val = v, \ 64 .lane_mask = l, \ 65 } 66 67 /* set of registers with offsets different per-PHY */ 68 enum qphy_reg_layout { 69 /* Common block control registers */ 70 QPHY_COM_SW_RESET, 71 QPHY_COM_POWER_DOWN_CONTROL, 72 QPHY_COM_START_CONTROL, 73 QPHY_COM_PCS_READY_STATUS, 74 /* PCS registers */ 75 QPHY_SW_RESET, 76 QPHY_START_CTRL, 77 QPHY_PCS_STATUS, 78 /* Keep last to ensure regs_layout arrays are properly initialized */ 79 QPHY_LAYOUT_SIZE 80 }; 81 82 static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = { 83 [QPHY_COM_SW_RESET] = 0x400, 84 [QPHY_COM_POWER_DOWN_CONTROL] = 0x404, 85 [QPHY_COM_START_CONTROL] = 0x408, 86 [QPHY_COM_PCS_READY_STATUS] = 0x448, 87 [QPHY_SW_RESET] = QPHY_V2_PCS_SW_RESET, 88 [QPHY_START_CTRL] = QPHY_V2_PCS_START_CONTROL, 89 [QPHY_PCS_STATUS] = QPHY_V2_PCS_PCI_PCS_STATUS, 90 }; 91 92 static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = { 93 QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c), 94 QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10), 95 QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33), 96 QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06), 97 QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42), 98 QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00), 99 QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), 100 QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), 101 QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01), 102 QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01), 103 QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00), 104 QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a), 105 QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09), 106 QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82), 107 QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03), 108 QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55), 109 QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55), 110 QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), 111 QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a), 112 QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a), 113 QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33), 114 QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02), 115 QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f), 116 QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04), 117 QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b), 118 QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), 119 QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), 120 QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), 121 QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), 122 QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01), 123 QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), 124 QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01), 125 QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02), 126 QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00), 127 QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), 128 QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), 129 QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15), 130 QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f), 131 QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f), 132 QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), 133 QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10), 134 QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00), 135 QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40), 136 }; 137 138 static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = { 139 QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45), 140 QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06), 141 }; 142 143 static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = { 144 QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c), 145 QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01), 146 QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00), 147 QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), 148 QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18), 149 QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04), 150 QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04), 151 QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), 152 QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14), 153 QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19), 154 }; 155 156 static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = { 157 QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_IDLE_DTCT_CNTRL, 0x4c), 158 QMP_PHY_INIT_CFG(QPHY_V2_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00), 159 QMP_PHY_INIT_CFG(QPHY_V2_PCS_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01), 160 161 QMP_PHY_INIT_CFG(QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME, 0x05), 162 163 QMP_PHY_INIT_CFG(QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE, 0x05), 164 QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_DOWN_CONTROL, 0x02), 165 QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG4, 0x00), 166 QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG1, 0xa3), 167 QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0x0e), 168 }; 169 170 /* struct qmp_phy_cfg - per-PHY initialization config */ 171 struct qmp_phy_cfg { 172 /* number of PHYs provided by this block */ 173 int num_phys; 174 175 /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ 176 const struct qmp_phy_init_tbl *serdes_tbl; 177 int serdes_tbl_num; 178 const struct qmp_phy_init_tbl *tx_tbl; 179 int tx_tbl_num; 180 const struct qmp_phy_init_tbl *rx_tbl; 181 int rx_tbl_num; 182 const struct qmp_phy_init_tbl *pcs_tbl; 183 int pcs_tbl_num; 184 185 /* clock ids to be requested */ 186 const char * const *clk_list; 187 int num_clks; 188 /* resets to be requested */ 189 const char * const *reset_list; 190 int num_resets; 191 /* regulators to be requested */ 192 const char * const *vreg_list; 193 int num_vregs; 194 195 /* array of registers with different offsets */ 196 const unsigned int *regs; 197 }; 198 199 /** 200 * struct qmp_phy - per-lane phy descriptor 201 * 202 * @phy: generic phy 203 * @cfg: phy specific configuration 204 * @serdes: iomapped memory space for phy's serdes (i.e. PLL) 205 * @tx: iomapped memory space for lane's tx 206 * @rx: iomapped memory space for lane's rx 207 * @pcs: iomapped memory space for lane's pcs 208 * @pipe_clk: pipe clock 209 * @index: lane index 210 * @qmp: QMP phy to which this lane belongs 211 * @lane_rst: lane's reset controller 212 */ 213 struct qmp_phy { 214 struct phy *phy; 215 const struct qmp_phy_cfg *cfg; 216 void __iomem *serdes; 217 void __iomem *tx; 218 void __iomem *rx; 219 void __iomem *pcs; 220 struct clk *pipe_clk; 221 unsigned int index; 222 struct qcom_qmp *qmp; 223 struct reset_control *lane_rst; 224 }; 225 226 /** 227 * struct qcom_qmp - structure holding QMP phy block attributes 228 * 229 * @dev: device 230 * 231 * @clks: array of clocks required by phy 232 * @resets: array of resets required by phy 233 * @vregs: regulator supplies bulk data 234 * 235 * @phys: array of per-lane phy descriptors 236 * @phy_mutex: mutex lock for PHY common block initialization 237 * @init_count: phy common block initialization count 238 */ 239 struct qcom_qmp { 240 struct device *dev; 241 242 struct clk_bulk_data *clks; 243 struct reset_control_bulk_data *resets; 244 struct regulator_bulk_data *vregs; 245 246 struct qmp_phy **phys; 247 248 struct mutex phy_mutex; 249 int init_count; 250 }; 251 252 static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val) 253 { 254 u32 reg; 255 256 reg = readl(base + offset); 257 reg |= val; 258 writel(reg, base + offset); 259 260 /* ensure that above write is through */ 261 readl(base + offset); 262 } 263 264 static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val) 265 { 266 u32 reg; 267 268 reg = readl(base + offset); 269 reg &= ~val; 270 writel(reg, base + offset); 271 272 /* ensure that above write is through */ 273 readl(base + offset); 274 } 275 276 /* list of clocks required by phy */ 277 static const char * const msm8996_phy_clk_l[] = { 278 "aux", "cfg_ahb", "ref", 279 }; 280 281 /* list of resets */ 282 static const char * const msm8996_pciephy_reset_l[] = { 283 "phy", "common", "cfg", 284 }; 285 286 /* list of regulators */ 287 static const char * const qmp_phy_vreg_l[] = { 288 "vdda-phy", "vdda-pll", 289 }; 290 291 static const struct qmp_phy_cfg msm8996_pciephy_cfg = { 292 .num_phys = 3, 293 294 .serdes_tbl = msm8996_pcie_serdes_tbl, 295 .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl), 296 .tx_tbl = msm8996_pcie_tx_tbl, 297 .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl), 298 .rx_tbl = msm8996_pcie_rx_tbl, 299 .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl), 300 .pcs_tbl = msm8996_pcie_pcs_tbl, 301 .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl), 302 .clk_list = msm8996_phy_clk_l, 303 .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), 304 .reset_list = msm8996_pciephy_reset_l, 305 .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l), 306 .vreg_list = qmp_phy_vreg_l, 307 .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), 308 .regs = pciephy_regs_layout, 309 }; 310 311 static void qmp_pcie_msm8996_configure_lane(void __iomem *base, 312 const struct qmp_phy_init_tbl tbl[], 313 int num, 314 u8 lane_mask) 315 { 316 int i; 317 const struct qmp_phy_init_tbl *t = tbl; 318 319 if (!t) 320 return; 321 322 for (i = 0; i < num; i++, t++) { 323 if (!(t->lane_mask & lane_mask)) 324 continue; 325 326 writel(t->val, base + t->offset); 327 } 328 } 329 330 static void qmp_pcie_msm8996_configure(void __iomem *base, 331 const struct qmp_phy_init_tbl tbl[], 332 int num) 333 { 334 qmp_pcie_msm8996_configure_lane(base, tbl, num, 0xff); 335 } 336 337 static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy) 338 { 339 struct qcom_qmp *qmp = qphy->qmp; 340 const struct qmp_phy_cfg *cfg = qphy->cfg; 341 void __iomem *serdes = qphy->serdes; 342 const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; 343 int serdes_tbl_num = cfg->serdes_tbl_num; 344 void __iomem *status; 345 unsigned int val; 346 int ret; 347 348 qmp_pcie_msm8996_configure(serdes, serdes_tbl, serdes_tbl_num); 349 350 qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET); 351 qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], 352 SERDES_START | PCS_START); 353 354 status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS]; 355 ret = readl_poll_timeout(status, val, (val & PCS_READY), 200, 356 PHY_INIT_COMPLETE_TIMEOUT); 357 if (ret) { 358 dev_err(qmp->dev, 359 "phy common block init timed-out\n"); 360 return ret; 361 } 362 363 return 0; 364 } 365 366 static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy) 367 { 368 struct qcom_qmp *qmp = qphy->qmp; 369 const struct qmp_phy_cfg *cfg = qphy->cfg; 370 void __iomem *serdes = qphy->serdes; 371 int ret; 372 373 mutex_lock(&qmp->phy_mutex); 374 if (qmp->init_count++) { 375 mutex_unlock(&qmp->phy_mutex); 376 return 0; 377 } 378 379 ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); 380 if (ret) { 381 dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); 382 goto err_decrement_count; 383 } 384 385 ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); 386 if (ret) { 387 dev_err(qmp->dev, "reset assert failed\n"); 388 goto err_disable_regulators; 389 } 390 391 ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets); 392 if (ret) { 393 dev_err(qmp->dev, "reset deassert failed\n"); 394 goto err_disable_regulators; 395 } 396 397 ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks); 398 if (ret) 399 goto err_assert_reset; 400 401 qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL], 402 SW_PWRDN); 403 404 mutex_unlock(&qmp->phy_mutex); 405 406 return 0; 407 408 err_assert_reset: 409 reset_control_bulk_assert(cfg->num_resets, qmp->resets); 410 err_disable_regulators: 411 regulator_bulk_disable(cfg->num_vregs, qmp->vregs); 412 err_decrement_count: 413 qmp->init_count--; 414 mutex_unlock(&qmp->phy_mutex); 415 416 return ret; 417 } 418 419 static int qmp_pcie_msm8996_com_exit(struct qmp_phy *qphy) 420 { 421 struct qcom_qmp *qmp = qphy->qmp; 422 const struct qmp_phy_cfg *cfg = qphy->cfg; 423 void __iomem *serdes = qphy->serdes; 424 425 mutex_lock(&qmp->phy_mutex); 426 if (--qmp->init_count) { 427 mutex_unlock(&qmp->phy_mutex); 428 return 0; 429 } 430 431 qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], 432 SERDES_START | PCS_START); 433 qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], 434 SW_RESET); 435 qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL], 436 SW_PWRDN); 437 438 reset_control_bulk_assert(cfg->num_resets, qmp->resets); 439 440 clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); 441 442 regulator_bulk_disable(cfg->num_vregs, qmp->vregs); 443 444 mutex_unlock(&qmp->phy_mutex); 445 446 return 0; 447 } 448 449 static int qmp_pcie_msm8996_init(struct phy *phy) 450 { 451 struct qmp_phy *qphy = phy_get_drvdata(phy); 452 struct qcom_qmp *qmp = qphy->qmp; 453 int ret; 454 dev_vdbg(qmp->dev, "Initializing QMP phy\n"); 455 456 ret = qmp_pcie_msm8996_com_init(qphy); 457 if (ret) 458 return ret; 459 460 return 0; 461 } 462 463 static int qmp_pcie_msm8996_power_on(struct phy *phy) 464 { 465 struct qmp_phy *qphy = phy_get_drvdata(phy); 466 struct qcom_qmp *qmp = qphy->qmp; 467 const struct qmp_phy_cfg *cfg = qphy->cfg; 468 void __iomem *tx = qphy->tx; 469 void __iomem *rx = qphy->rx; 470 void __iomem *pcs = qphy->pcs; 471 void __iomem *status; 472 unsigned int val; 473 int ret; 474 475 qmp_pcie_msm8996_serdes_init(qphy); 476 477 ret = reset_control_deassert(qphy->lane_rst); 478 if (ret) { 479 dev_err(qmp->dev, "lane%d reset deassert failed\n", 480 qphy->index); 481 return ret; 482 } 483 484 ret = clk_prepare_enable(qphy->pipe_clk); 485 if (ret) { 486 dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret); 487 goto err_reset_lane; 488 } 489 490 /* Tx, Rx, and PCS configurations */ 491 qmp_pcie_msm8996_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 492 qmp_pcie_msm8996_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 493 qmp_pcie_msm8996_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 494 495 /* 496 * Pull out PHY from POWER DOWN state. 497 * This is active low enable signal to power-down PHY. 498 */ 499 qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, 500 SW_PWRDN | REFCLK_DRV_DSBL); 501 502 usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX); 503 504 /* Pull PHY out of reset state */ 505 qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); 506 507 /* start SerDes and Phy-Coding-Sublayer */ 508 qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], 509 PCS_START | PLL_READY_GATE_EN); 510 511 status = pcs + cfg->regs[QPHY_PCS_STATUS]; 512 ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200, 513 PHY_INIT_COMPLETE_TIMEOUT); 514 if (ret) { 515 dev_err(qmp->dev, "phy initialization timed-out\n"); 516 goto err_disable_pipe_clk; 517 } 518 519 return 0; 520 521 err_disable_pipe_clk: 522 clk_disable_unprepare(qphy->pipe_clk); 523 err_reset_lane: 524 reset_control_assert(qphy->lane_rst); 525 526 return ret; 527 } 528 529 static int qmp_pcie_msm8996_power_off(struct phy *phy) 530 { 531 struct qmp_phy *qphy = phy_get_drvdata(phy); 532 const struct qmp_phy_cfg *cfg = qphy->cfg; 533 534 clk_disable_unprepare(qphy->pipe_clk); 535 536 /* PHY reset */ 537 qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); 538 539 /* stop SerDes and Phy-Coding-Sublayer */ 540 qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], 541 SERDES_START | PCS_START); 542 543 /* Put PHY into POWER DOWN state: active low */ 544 qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, 545 SW_PWRDN | REFCLK_DRV_DSBL); 546 547 return 0; 548 } 549 550 static int qmp_pcie_msm8996_exit(struct phy *phy) 551 { 552 struct qmp_phy *qphy = phy_get_drvdata(phy); 553 554 reset_control_assert(qphy->lane_rst); 555 556 qmp_pcie_msm8996_com_exit(qphy); 557 558 return 0; 559 } 560 561 static int qmp_pcie_msm8996_enable(struct phy *phy) 562 { 563 int ret; 564 565 ret = qmp_pcie_msm8996_init(phy); 566 if (ret) 567 return ret; 568 569 ret = qmp_pcie_msm8996_power_on(phy); 570 if (ret) 571 qmp_pcie_msm8996_exit(phy); 572 573 return ret; 574 } 575 576 static int qmp_pcie_msm8996_disable(struct phy *phy) 577 { 578 int ret; 579 580 ret = qmp_pcie_msm8996_power_off(phy); 581 if (ret) 582 return ret; 583 return qmp_pcie_msm8996_exit(phy); 584 } 585 586 static int qmp_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) 587 { 588 struct qcom_qmp *qmp = dev_get_drvdata(dev); 589 int num = cfg->num_vregs; 590 int i; 591 592 qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); 593 if (!qmp->vregs) 594 return -ENOMEM; 595 596 for (i = 0; i < num; i++) 597 qmp->vregs[i].supply = cfg->vreg_list[i]; 598 599 return devm_regulator_bulk_get(dev, num, qmp->vregs); 600 } 601 602 static int qmp_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) 603 { 604 struct qcom_qmp *qmp = dev_get_drvdata(dev); 605 int i; 606 int ret; 607 608 qmp->resets = devm_kcalloc(dev, cfg->num_resets, 609 sizeof(*qmp->resets), GFP_KERNEL); 610 if (!qmp->resets) 611 return -ENOMEM; 612 613 for (i = 0; i < cfg->num_resets; i++) 614 qmp->resets[i].id = cfg->reset_list[i]; 615 616 ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets); 617 if (ret) 618 return dev_err_probe(dev, ret, "failed to get resets\n"); 619 620 return 0; 621 } 622 623 static int qmp_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) 624 { 625 struct qcom_qmp *qmp = dev_get_drvdata(dev); 626 int num = cfg->num_clks; 627 int i; 628 629 qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL); 630 if (!qmp->clks) 631 return -ENOMEM; 632 633 for (i = 0; i < num; i++) 634 qmp->clks[i].id = cfg->clk_list[i]; 635 636 return devm_clk_bulk_get(dev, num, qmp->clks); 637 } 638 639 static void phy_clk_release_provider(void *res) 640 { 641 of_clk_del_provider(res); 642 } 643 644 /* 645 * Register a fixed rate pipe clock. 646 * 647 * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate 648 * controls it. The <s>_pipe_clk coming out of the GCC is requested 649 * by the PHY driver for its operations. 650 * We register the <s>_pipe_clksrc here. The gcc driver takes care 651 * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk. 652 * Below picture shows this relationship. 653 * 654 * +---------------+ 655 * | PHY block |<<---------------------------------------+ 656 * | | | 657 * | +-------+ | +-----+ | 658 * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+ 659 * clk | +-------+ | +-----+ 660 * +---------------+ 661 */ 662 static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) 663 { 664 struct clk_fixed_rate *fixed; 665 struct clk_init_data init = { }; 666 int ret; 667 668 ret = of_property_read_string(np, "clock-output-names", &init.name); 669 if (ret) { 670 dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np); 671 return ret; 672 } 673 674 fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL); 675 if (!fixed) 676 return -ENOMEM; 677 678 init.ops = &clk_fixed_rate_ops; 679 680 /* controllers using QMP phys use 125MHz pipe clock interface */ 681 fixed->fixed_rate = 125000000; 682 fixed->hw.init = &init; 683 684 ret = devm_clk_hw_register(qmp->dev, &fixed->hw); 685 if (ret) 686 return ret; 687 688 ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw); 689 if (ret) 690 return ret; 691 692 /* 693 * Roll a devm action because the clock provider is the child node, but 694 * the child node is not actually a device. 695 */ 696 return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np); 697 } 698 699 static const struct phy_ops qmp_pcie_msm8996_ops = { 700 .power_on = qmp_pcie_msm8996_enable, 701 .power_off = qmp_pcie_msm8996_disable, 702 .owner = THIS_MODULE, 703 }; 704 705 static void qcom_qmp_reset_control_put(void *data) 706 { 707 reset_control_put(data); 708 } 709 710 static int qmp_pcie_msm8996_create(struct device *dev, struct device_node *np, int id, 711 void __iomem *serdes, const struct qmp_phy_cfg *cfg) 712 { 713 struct qcom_qmp *qmp = dev_get_drvdata(dev); 714 struct phy *generic_phy; 715 struct qmp_phy *qphy; 716 int ret; 717 718 qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); 719 if (!qphy) 720 return -ENOMEM; 721 722 qphy->cfg = cfg; 723 qphy->serdes = serdes; 724 /* 725 * Get memory resources for each PHY: 726 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. 727 */ 728 qphy->tx = devm_of_iomap(dev, np, 0, NULL); 729 if (IS_ERR(qphy->tx)) 730 return PTR_ERR(qphy->tx); 731 732 qphy->rx = devm_of_iomap(dev, np, 1, NULL); 733 if (IS_ERR(qphy->rx)) 734 return PTR_ERR(qphy->rx); 735 736 qphy->pcs = devm_of_iomap(dev, np, 2, NULL); 737 if (IS_ERR(qphy->pcs)) 738 return PTR_ERR(qphy->pcs); 739 740 qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL); 741 if (IS_ERR(qphy->pipe_clk)) { 742 return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk), 743 "failed to get lane%d pipe clock\n", id); 744 } 745 746 qphy->lane_rst = of_reset_control_get_exclusive_by_index(np, 0); 747 if (IS_ERR(qphy->lane_rst)) { 748 dev_err(dev, "failed to get lane%d reset\n", id); 749 return PTR_ERR(qphy->lane_rst); 750 } 751 ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put, 752 qphy->lane_rst); 753 if (ret) 754 return ret; 755 756 generic_phy = devm_phy_create(dev, np, &qmp_pcie_msm8996_ops); 757 if (IS_ERR(generic_phy)) { 758 ret = PTR_ERR(generic_phy); 759 dev_err(dev, "failed to create qphy %d\n", ret); 760 return ret; 761 } 762 763 qphy->phy = generic_phy; 764 qphy->index = id; 765 qphy->qmp = qmp; 766 qmp->phys[id] = qphy; 767 phy_set_drvdata(generic_phy, qphy); 768 769 return 0; 770 } 771 772 static const struct of_device_id qmp_pcie_msm8996_of_match_table[] = { 773 { 774 .compatible = "qcom,msm8996-qmp-pcie-phy", 775 .data = &msm8996_pciephy_cfg, 776 }, 777 { }, 778 }; 779 MODULE_DEVICE_TABLE(of, qmp_pcie_msm8996_of_match_table); 780 781 static int qmp_pcie_msm8996_probe(struct platform_device *pdev) 782 { 783 struct qcom_qmp *qmp; 784 struct device *dev = &pdev->dev; 785 struct device_node *child; 786 struct phy_provider *phy_provider; 787 void __iomem *serdes; 788 const struct qmp_phy_cfg *cfg = NULL; 789 int num, id, expected_phys; 790 int ret; 791 792 qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); 793 if (!qmp) 794 return -ENOMEM; 795 796 qmp->dev = dev; 797 dev_set_drvdata(dev, qmp); 798 799 cfg = of_device_get_match_data(dev); 800 if (!cfg) 801 return -EINVAL; 802 803 serdes = devm_platform_ioremap_resource(pdev, 0); 804 if (IS_ERR(serdes)) 805 return PTR_ERR(serdes); 806 807 expected_phys = cfg->num_phys; 808 809 mutex_init(&qmp->phy_mutex); 810 811 ret = qmp_pcie_msm8996_clk_init(dev, cfg); 812 if (ret) 813 return ret; 814 815 ret = qmp_pcie_msm8996_reset_init(dev, cfg); 816 if (ret) 817 return ret; 818 819 ret = qmp_pcie_msm8996_vreg_init(dev, cfg); 820 if (ret) 821 return ret; 822 823 num = of_get_available_child_count(dev->of_node); 824 /* do we have a rogue child node ? */ 825 if (num > expected_phys) 826 return -EINVAL; 827 828 qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); 829 if (!qmp->phys) 830 return -ENOMEM; 831 832 id = 0; 833 for_each_available_child_of_node(dev->of_node, child) { 834 /* Create per-lane phy */ 835 ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg); 836 if (ret) { 837 dev_err(dev, "failed to create lane%d phy, %d\n", 838 id, ret); 839 goto err_node_put; 840 } 841 842 /* 843 * Register the pipe clock provided by phy. 844 * See function description to see details of this pipe clock. 845 */ 846 ret = phy_pipe_clk_register(qmp, child); 847 if (ret) { 848 dev_err(qmp->dev, 849 "failed to register pipe clock source\n"); 850 goto err_node_put; 851 } 852 853 id++; 854 } 855 856 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 857 858 return PTR_ERR_OR_ZERO(phy_provider); 859 860 err_node_put: 861 of_node_put(child); 862 return ret; 863 } 864 865 static struct platform_driver qmp_pcie_msm8996_driver = { 866 .probe = qmp_pcie_msm8996_probe, 867 .driver = { 868 .name = "qcom-qmp-msm8996-pcie-phy", 869 .of_match_table = qmp_pcie_msm8996_of_match_table, 870 }, 871 }; 872 873 module_platform_driver(qmp_pcie_msm8996_driver); 874 875 MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>"); 876 MODULE_DESCRIPTION("Qualcomm QMP MSM8996 PCIe PHY driver"); 877 MODULE_LICENSE("GPL v2"); 878