1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/pci.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/platform_device.h> 25 #include <linux/phy/phy.h> 26 #include <linux/regulator/consumer.h> 27 #include <linux/reset.h> 28 #include <linux/slab.h> 29 #include <linux/types.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 #define PCIE20_PARF_SYS_CTRL 0x00 35 #define MST_WAKEUP_EN BIT(13) 36 #define SLV_WAKEUP_EN BIT(12) 37 #define MSTR_ACLK_CGC_DIS BIT(10) 38 #define SLV_ACLK_CGC_DIS BIT(9) 39 #define CORE_CLK_CGC_DIS BIT(6) 40 #define AUX_PWR_DET BIT(4) 41 #define L23_CLK_RMV_DIS BIT(2) 42 #define L1_CLK_RMV_DIS BIT(1) 43 44 #define PCIE20_PARF_PHY_CTRL 0x40 45 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 46 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 47 48 #define PCIE20_PARF_PHY_REFCLK 0x4C 49 #define PHY_REFCLK_SSP_EN BIT(16) 50 #define PHY_REFCLK_USE_PAD BIT(12) 51 52 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 53 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 54 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 55 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 56 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 57 #define PCIE20_PARF_LTSSM 0x1B0 58 #define PCIE20_PARF_SID_OFFSET 0x234 59 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 60 #define PCIE20_PARF_DEVICE_TYPE 0x1000 61 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 62 63 #define PCIE20_ELBI_SYS_CTRL 0x04 64 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 65 66 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 67 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 68 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 69 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 70 #define CFG_BRIDGE_SB_INIT BIT(0) 71 72 #define PCIE_CAP_LINK1_VAL 0x2FD7F 73 74 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 75 76 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 77 #define DBI_RO_WR_EN 1 78 79 #define PERST_DELAY_US 1000 80 /* PARF registers */ 81 #define PCIE20_PARF_PCS_DEEMPH 0x34 82 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 83 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 84 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 85 86 #define PCIE20_PARF_PCS_SWING 0x38 87 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 88 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 89 90 #define PCIE20_PARF_CONFIG_BITS 0x50 91 #define PHY_RX0_EQ(x) ((x) << 24) 92 93 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 94 #define SLV_ADDR_SPACE_SZ 0x10000000 95 96 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 97 98 #define DEVICE_TYPE_RC 0x4 99 100 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 101 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 102 103 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 104 105 struct qcom_pcie_resources_2_1_0 { 106 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 107 struct reset_control *pci_reset; 108 struct reset_control *axi_reset; 109 struct reset_control *ahb_reset; 110 struct reset_control *por_reset; 111 struct reset_control *phy_reset; 112 struct reset_control *ext_reset; 113 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 114 }; 115 116 struct qcom_pcie_resources_1_0_0 { 117 struct clk *iface; 118 struct clk *aux; 119 struct clk *master_bus; 120 struct clk *slave_bus; 121 struct reset_control *core; 122 struct regulator *vdda; 123 }; 124 125 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 126 struct qcom_pcie_resources_2_3_2 { 127 struct clk *aux_clk; 128 struct clk *master_clk; 129 struct clk *slave_clk; 130 struct clk *cfg_clk; 131 struct clk *pipe_clk; 132 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 133 }; 134 135 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 136 struct qcom_pcie_resources_2_4_0 { 137 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 138 int num_clks; 139 struct reset_control *axi_m_reset; 140 struct reset_control *axi_s_reset; 141 struct reset_control *pipe_reset; 142 struct reset_control *axi_m_vmid_reset; 143 struct reset_control *axi_s_xpu_reset; 144 struct reset_control *parf_reset; 145 struct reset_control *phy_reset; 146 struct reset_control *axi_m_sticky_reset; 147 struct reset_control *pipe_sticky_reset; 148 struct reset_control *pwr_reset; 149 struct reset_control *ahb_reset; 150 struct reset_control *phy_ahb_reset; 151 }; 152 153 struct qcom_pcie_resources_2_3_3 { 154 struct clk *iface; 155 struct clk *axi_m_clk; 156 struct clk *axi_s_clk; 157 struct clk *ahb_clk; 158 struct clk *aux_clk; 159 struct reset_control *rst[7]; 160 }; 161 162 /* 6 clocks typically, 7 for sm8250 */ 163 struct qcom_pcie_resources_2_7_0 { 164 struct clk_bulk_data clks[9]; 165 int num_clks; 166 struct regulator_bulk_data supplies[2]; 167 struct reset_control *pci_reset; 168 struct clk *pipe_clk; 169 struct clk *pipe_clk_src; 170 struct clk *phy_pipe_clk; 171 struct clk *ref_clk_src; 172 }; 173 174 union qcom_pcie_resources { 175 struct qcom_pcie_resources_1_0_0 v1_0_0; 176 struct qcom_pcie_resources_2_1_0 v2_1_0; 177 struct qcom_pcie_resources_2_3_2 v2_3_2; 178 struct qcom_pcie_resources_2_3_3 v2_3_3; 179 struct qcom_pcie_resources_2_4_0 v2_4_0; 180 struct qcom_pcie_resources_2_7_0 v2_7_0; 181 }; 182 183 struct qcom_pcie; 184 185 struct qcom_pcie_ops { 186 int (*get_resources)(struct qcom_pcie *pcie); 187 int (*init)(struct qcom_pcie *pcie); 188 int (*post_init)(struct qcom_pcie *pcie); 189 void (*deinit)(struct qcom_pcie *pcie); 190 void (*post_deinit)(struct qcom_pcie *pcie); 191 void (*ltssm_enable)(struct qcom_pcie *pcie); 192 int (*config_sid)(struct qcom_pcie *pcie); 193 }; 194 195 struct qcom_pcie_cfg { 196 const struct qcom_pcie_ops *ops; 197 unsigned int pipe_clk_need_muxing:1; 198 unsigned int has_tbu_clk:1; 199 unsigned int has_ddrss_sf_tbu_clk:1; 200 unsigned int has_aggre0_clk:1; 201 unsigned int has_aggre1_clk:1; 202 }; 203 204 struct qcom_pcie { 205 struct dw_pcie *pci; 206 void __iomem *parf; /* DT parf */ 207 void __iomem *elbi; /* DT elbi */ 208 union qcom_pcie_resources res; 209 struct phy *phy; 210 struct gpio_desc *reset; 211 const struct qcom_pcie_cfg *cfg; 212 }; 213 214 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 215 216 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 217 { 218 gpiod_set_value_cansleep(pcie->reset, 1); 219 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 220 } 221 222 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 223 { 224 /* Ensure that PERST has been asserted for at least 100 ms */ 225 msleep(100); 226 gpiod_set_value_cansleep(pcie->reset, 0); 227 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 228 } 229 230 static int qcom_pcie_start_link(struct dw_pcie *pci) 231 { 232 struct qcom_pcie *pcie = to_qcom_pcie(pci); 233 234 /* Enable Link Training state machine */ 235 if (pcie->cfg->ops->ltssm_enable) 236 pcie->cfg->ops->ltssm_enable(pcie); 237 238 return 0; 239 } 240 241 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 242 { 243 u32 val; 244 245 /* enable link training */ 246 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 247 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 248 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 249 } 250 251 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 252 { 253 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 254 struct dw_pcie *pci = pcie->pci; 255 struct device *dev = pci->dev; 256 int ret; 257 258 res->supplies[0].supply = "vdda"; 259 res->supplies[1].supply = "vdda_phy"; 260 res->supplies[2].supply = "vdda_refclk"; 261 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 262 res->supplies); 263 if (ret) 264 return ret; 265 266 res->clks[0].id = "iface"; 267 res->clks[1].id = "core"; 268 res->clks[2].id = "phy"; 269 res->clks[3].id = "aux"; 270 res->clks[4].id = "ref"; 271 272 /* iface, core, phy are required */ 273 ret = devm_clk_bulk_get(dev, 3, res->clks); 274 if (ret < 0) 275 return ret; 276 277 /* aux, ref are optional */ 278 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 279 if (ret < 0) 280 return ret; 281 282 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 283 if (IS_ERR(res->pci_reset)) 284 return PTR_ERR(res->pci_reset); 285 286 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 287 if (IS_ERR(res->axi_reset)) 288 return PTR_ERR(res->axi_reset); 289 290 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 291 if (IS_ERR(res->ahb_reset)) 292 return PTR_ERR(res->ahb_reset); 293 294 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 295 if (IS_ERR(res->por_reset)) 296 return PTR_ERR(res->por_reset); 297 298 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 299 if (IS_ERR(res->ext_reset)) 300 return PTR_ERR(res->ext_reset); 301 302 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 303 return PTR_ERR_OR_ZERO(res->phy_reset); 304 } 305 306 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 307 { 308 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 309 310 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 311 reset_control_assert(res->pci_reset); 312 reset_control_assert(res->axi_reset); 313 reset_control_assert(res->ahb_reset); 314 reset_control_assert(res->por_reset); 315 reset_control_assert(res->ext_reset); 316 reset_control_assert(res->phy_reset); 317 318 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 319 320 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 321 } 322 323 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 324 { 325 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 326 struct dw_pcie *pci = pcie->pci; 327 struct device *dev = pci->dev; 328 struct device_node *node = dev->of_node; 329 u32 val; 330 int ret; 331 332 /* reset the PCIe interface as uboot can leave it undefined state */ 333 reset_control_assert(res->pci_reset); 334 reset_control_assert(res->axi_reset); 335 reset_control_assert(res->ahb_reset); 336 reset_control_assert(res->por_reset); 337 reset_control_assert(res->ext_reset); 338 reset_control_assert(res->phy_reset); 339 340 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 341 342 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 343 if (ret < 0) { 344 dev_err(dev, "cannot enable regulators\n"); 345 return ret; 346 } 347 348 ret = reset_control_deassert(res->ahb_reset); 349 if (ret) { 350 dev_err(dev, "cannot deassert ahb reset\n"); 351 goto err_deassert_ahb; 352 } 353 354 ret = reset_control_deassert(res->ext_reset); 355 if (ret) { 356 dev_err(dev, "cannot deassert ext reset\n"); 357 goto err_deassert_ext; 358 } 359 360 ret = reset_control_deassert(res->phy_reset); 361 if (ret) { 362 dev_err(dev, "cannot deassert phy reset\n"); 363 goto err_deassert_phy; 364 } 365 366 ret = reset_control_deassert(res->pci_reset); 367 if (ret) { 368 dev_err(dev, "cannot deassert pci reset\n"); 369 goto err_deassert_pci; 370 } 371 372 ret = reset_control_deassert(res->por_reset); 373 if (ret) { 374 dev_err(dev, "cannot deassert por reset\n"); 375 goto err_deassert_por; 376 } 377 378 ret = reset_control_deassert(res->axi_reset); 379 if (ret) { 380 dev_err(dev, "cannot deassert axi reset\n"); 381 goto err_deassert_axi; 382 } 383 384 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 385 if (ret) 386 goto err_clks; 387 388 /* enable PCIe clocks and resets */ 389 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 390 val &= ~BIT(0); 391 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 392 393 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 394 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 395 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 396 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 397 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 398 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 399 writel(PCS_SWING_TX_SWING_FULL(120) | 400 PCS_SWING_TX_SWING_LOW(120), 401 pcie->parf + PCIE20_PARF_PCS_SWING); 402 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 403 } 404 405 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 406 /* set TX termination offset */ 407 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 408 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 409 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 410 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 411 } 412 413 /* enable external reference clock */ 414 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 415 /* USE_PAD is required only for ipq806x */ 416 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 417 val &= ~PHY_REFCLK_USE_PAD; 418 val |= PHY_REFCLK_SSP_EN; 419 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 420 421 /* wait for clock acquisition */ 422 usleep_range(1000, 1500); 423 424 /* Set the Max TLP size to 2K, instead of using default of 4K */ 425 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 426 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 427 writel(CFG_BRIDGE_SB_INIT, 428 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 429 430 return 0; 431 432 err_clks: 433 reset_control_assert(res->axi_reset); 434 err_deassert_axi: 435 reset_control_assert(res->por_reset); 436 err_deassert_por: 437 reset_control_assert(res->pci_reset); 438 err_deassert_pci: 439 reset_control_assert(res->phy_reset); 440 err_deassert_phy: 441 reset_control_assert(res->ext_reset); 442 err_deassert_ext: 443 reset_control_assert(res->ahb_reset); 444 err_deassert_ahb: 445 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 446 447 return ret; 448 } 449 450 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 451 { 452 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 453 struct dw_pcie *pci = pcie->pci; 454 struct device *dev = pci->dev; 455 456 res->vdda = devm_regulator_get(dev, "vdda"); 457 if (IS_ERR(res->vdda)) 458 return PTR_ERR(res->vdda); 459 460 res->iface = devm_clk_get(dev, "iface"); 461 if (IS_ERR(res->iface)) 462 return PTR_ERR(res->iface); 463 464 res->aux = devm_clk_get(dev, "aux"); 465 if (IS_ERR(res->aux)) 466 return PTR_ERR(res->aux); 467 468 res->master_bus = devm_clk_get(dev, "master_bus"); 469 if (IS_ERR(res->master_bus)) 470 return PTR_ERR(res->master_bus); 471 472 res->slave_bus = devm_clk_get(dev, "slave_bus"); 473 if (IS_ERR(res->slave_bus)) 474 return PTR_ERR(res->slave_bus); 475 476 res->core = devm_reset_control_get_exclusive(dev, "core"); 477 return PTR_ERR_OR_ZERO(res->core); 478 } 479 480 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 481 { 482 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 483 484 reset_control_assert(res->core); 485 clk_disable_unprepare(res->slave_bus); 486 clk_disable_unprepare(res->master_bus); 487 clk_disable_unprepare(res->iface); 488 clk_disable_unprepare(res->aux); 489 regulator_disable(res->vdda); 490 } 491 492 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 493 { 494 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 495 struct dw_pcie *pci = pcie->pci; 496 struct device *dev = pci->dev; 497 int ret; 498 499 ret = reset_control_deassert(res->core); 500 if (ret) { 501 dev_err(dev, "cannot deassert core reset\n"); 502 return ret; 503 } 504 505 ret = clk_prepare_enable(res->aux); 506 if (ret) { 507 dev_err(dev, "cannot prepare/enable aux clock\n"); 508 goto err_res; 509 } 510 511 ret = clk_prepare_enable(res->iface); 512 if (ret) { 513 dev_err(dev, "cannot prepare/enable iface clock\n"); 514 goto err_aux; 515 } 516 517 ret = clk_prepare_enable(res->master_bus); 518 if (ret) { 519 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 520 goto err_iface; 521 } 522 523 ret = clk_prepare_enable(res->slave_bus); 524 if (ret) { 525 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 526 goto err_master; 527 } 528 529 ret = regulator_enable(res->vdda); 530 if (ret) { 531 dev_err(dev, "cannot enable vdda regulator\n"); 532 goto err_slave; 533 } 534 535 /* change DBI base address */ 536 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 537 538 if (IS_ENABLED(CONFIG_PCI_MSI)) { 539 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 540 541 val |= BIT(31); 542 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 543 } 544 545 return 0; 546 err_slave: 547 clk_disable_unprepare(res->slave_bus); 548 err_master: 549 clk_disable_unprepare(res->master_bus); 550 err_iface: 551 clk_disable_unprepare(res->iface); 552 err_aux: 553 clk_disable_unprepare(res->aux); 554 err_res: 555 reset_control_assert(res->core); 556 557 return ret; 558 } 559 560 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 561 { 562 u32 val; 563 564 /* enable link training */ 565 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 566 val |= BIT(8); 567 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 568 } 569 570 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 571 { 572 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 573 struct dw_pcie *pci = pcie->pci; 574 struct device *dev = pci->dev; 575 int ret; 576 577 res->supplies[0].supply = "vdda"; 578 res->supplies[1].supply = "vddpe-3v3"; 579 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 580 res->supplies); 581 if (ret) 582 return ret; 583 584 res->aux_clk = devm_clk_get(dev, "aux"); 585 if (IS_ERR(res->aux_clk)) 586 return PTR_ERR(res->aux_clk); 587 588 res->cfg_clk = devm_clk_get(dev, "cfg"); 589 if (IS_ERR(res->cfg_clk)) 590 return PTR_ERR(res->cfg_clk); 591 592 res->master_clk = devm_clk_get(dev, "bus_master"); 593 if (IS_ERR(res->master_clk)) 594 return PTR_ERR(res->master_clk); 595 596 res->slave_clk = devm_clk_get(dev, "bus_slave"); 597 if (IS_ERR(res->slave_clk)) 598 return PTR_ERR(res->slave_clk); 599 600 res->pipe_clk = devm_clk_get(dev, "pipe"); 601 return PTR_ERR_OR_ZERO(res->pipe_clk); 602 } 603 604 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 605 { 606 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 607 608 clk_disable_unprepare(res->slave_clk); 609 clk_disable_unprepare(res->master_clk); 610 clk_disable_unprepare(res->cfg_clk); 611 clk_disable_unprepare(res->aux_clk); 612 613 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 614 } 615 616 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) 617 { 618 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 619 620 clk_disable_unprepare(res->pipe_clk); 621 } 622 623 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 624 { 625 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 626 struct dw_pcie *pci = pcie->pci; 627 struct device *dev = pci->dev; 628 u32 val; 629 int ret; 630 631 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 632 if (ret < 0) { 633 dev_err(dev, "cannot enable regulators\n"); 634 return ret; 635 } 636 637 ret = clk_prepare_enable(res->aux_clk); 638 if (ret) { 639 dev_err(dev, "cannot prepare/enable aux clock\n"); 640 goto err_aux_clk; 641 } 642 643 ret = clk_prepare_enable(res->cfg_clk); 644 if (ret) { 645 dev_err(dev, "cannot prepare/enable cfg clock\n"); 646 goto err_cfg_clk; 647 } 648 649 ret = clk_prepare_enable(res->master_clk); 650 if (ret) { 651 dev_err(dev, "cannot prepare/enable master clock\n"); 652 goto err_master_clk; 653 } 654 655 ret = clk_prepare_enable(res->slave_clk); 656 if (ret) { 657 dev_err(dev, "cannot prepare/enable slave clock\n"); 658 goto err_slave_clk; 659 } 660 661 /* enable PCIe clocks and resets */ 662 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 663 val &= ~BIT(0); 664 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 665 666 /* change DBI base address */ 667 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 668 669 /* MAC PHY_POWERDOWN MUX DISABLE */ 670 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 671 val &= ~BIT(29); 672 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 673 674 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 675 val |= BIT(4); 676 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 677 678 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 679 val |= BIT(31); 680 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 681 682 return 0; 683 684 err_slave_clk: 685 clk_disable_unprepare(res->master_clk); 686 err_master_clk: 687 clk_disable_unprepare(res->cfg_clk); 688 err_cfg_clk: 689 clk_disable_unprepare(res->aux_clk); 690 691 err_aux_clk: 692 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 693 694 return ret; 695 } 696 697 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 698 { 699 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 700 struct dw_pcie *pci = pcie->pci; 701 struct device *dev = pci->dev; 702 int ret; 703 704 ret = clk_prepare_enable(res->pipe_clk); 705 if (ret) { 706 dev_err(dev, "cannot prepare/enable pipe clock\n"); 707 return ret; 708 } 709 710 return 0; 711 } 712 713 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 714 { 715 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 716 struct dw_pcie *pci = pcie->pci; 717 struct device *dev = pci->dev; 718 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 719 int ret; 720 721 res->clks[0].id = "aux"; 722 res->clks[1].id = "master_bus"; 723 res->clks[2].id = "slave_bus"; 724 res->clks[3].id = "iface"; 725 726 /* qcom,pcie-ipq4019 is defined without "iface" */ 727 res->num_clks = is_ipq ? 3 : 4; 728 729 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 730 if (ret < 0) 731 return ret; 732 733 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 734 if (IS_ERR(res->axi_m_reset)) 735 return PTR_ERR(res->axi_m_reset); 736 737 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 738 if (IS_ERR(res->axi_s_reset)) 739 return PTR_ERR(res->axi_s_reset); 740 741 if (is_ipq) { 742 /* 743 * These resources relates to the PHY or are secure clocks, but 744 * are controlled here for IPQ4019 745 */ 746 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 747 if (IS_ERR(res->pipe_reset)) 748 return PTR_ERR(res->pipe_reset); 749 750 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 751 "axi_m_vmid"); 752 if (IS_ERR(res->axi_m_vmid_reset)) 753 return PTR_ERR(res->axi_m_vmid_reset); 754 755 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 756 "axi_s_xpu"); 757 if (IS_ERR(res->axi_s_xpu_reset)) 758 return PTR_ERR(res->axi_s_xpu_reset); 759 760 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 761 if (IS_ERR(res->parf_reset)) 762 return PTR_ERR(res->parf_reset); 763 764 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 765 if (IS_ERR(res->phy_reset)) 766 return PTR_ERR(res->phy_reset); 767 } 768 769 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 770 "axi_m_sticky"); 771 if (IS_ERR(res->axi_m_sticky_reset)) 772 return PTR_ERR(res->axi_m_sticky_reset); 773 774 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 775 "pipe_sticky"); 776 if (IS_ERR(res->pipe_sticky_reset)) 777 return PTR_ERR(res->pipe_sticky_reset); 778 779 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 780 if (IS_ERR(res->pwr_reset)) 781 return PTR_ERR(res->pwr_reset); 782 783 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 784 if (IS_ERR(res->ahb_reset)) 785 return PTR_ERR(res->ahb_reset); 786 787 if (is_ipq) { 788 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 789 if (IS_ERR(res->phy_ahb_reset)) 790 return PTR_ERR(res->phy_ahb_reset); 791 } 792 793 return 0; 794 } 795 796 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 797 { 798 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 799 800 reset_control_assert(res->axi_m_reset); 801 reset_control_assert(res->axi_s_reset); 802 reset_control_assert(res->pipe_reset); 803 reset_control_assert(res->pipe_sticky_reset); 804 reset_control_assert(res->phy_reset); 805 reset_control_assert(res->phy_ahb_reset); 806 reset_control_assert(res->axi_m_sticky_reset); 807 reset_control_assert(res->pwr_reset); 808 reset_control_assert(res->ahb_reset); 809 clk_bulk_disable_unprepare(res->num_clks, res->clks); 810 } 811 812 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 813 { 814 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 815 struct dw_pcie *pci = pcie->pci; 816 struct device *dev = pci->dev; 817 u32 val; 818 int ret; 819 820 ret = reset_control_assert(res->axi_m_reset); 821 if (ret) { 822 dev_err(dev, "cannot assert axi master reset\n"); 823 return ret; 824 } 825 826 ret = reset_control_assert(res->axi_s_reset); 827 if (ret) { 828 dev_err(dev, "cannot assert axi slave reset\n"); 829 return ret; 830 } 831 832 usleep_range(10000, 12000); 833 834 ret = reset_control_assert(res->pipe_reset); 835 if (ret) { 836 dev_err(dev, "cannot assert pipe reset\n"); 837 return ret; 838 } 839 840 ret = reset_control_assert(res->pipe_sticky_reset); 841 if (ret) { 842 dev_err(dev, "cannot assert pipe sticky reset\n"); 843 return ret; 844 } 845 846 ret = reset_control_assert(res->phy_reset); 847 if (ret) { 848 dev_err(dev, "cannot assert phy reset\n"); 849 return ret; 850 } 851 852 ret = reset_control_assert(res->phy_ahb_reset); 853 if (ret) { 854 dev_err(dev, "cannot assert phy ahb reset\n"); 855 return ret; 856 } 857 858 usleep_range(10000, 12000); 859 860 ret = reset_control_assert(res->axi_m_sticky_reset); 861 if (ret) { 862 dev_err(dev, "cannot assert axi master sticky reset\n"); 863 return ret; 864 } 865 866 ret = reset_control_assert(res->pwr_reset); 867 if (ret) { 868 dev_err(dev, "cannot assert power reset\n"); 869 return ret; 870 } 871 872 ret = reset_control_assert(res->ahb_reset); 873 if (ret) { 874 dev_err(dev, "cannot assert ahb reset\n"); 875 return ret; 876 } 877 878 usleep_range(10000, 12000); 879 880 ret = reset_control_deassert(res->phy_ahb_reset); 881 if (ret) { 882 dev_err(dev, "cannot deassert phy ahb reset\n"); 883 return ret; 884 } 885 886 ret = reset_control_deassert(res->phy_reset); 887 if (ret) { 888 dev_err(dev, "cannot deassert phy reset\n"); 889 goto err_rst_phy; 890 } 891 892 ret = reset_control_deassert(res->pipe_reset); 893 if (ret) { 894 dev_err(dev, "cannot deassert pipe reset\n"); 895 goto err_rst_pipe; 896 } 897 898 ret = reset_control_deassert(res->pipe_sticky_reset); 899 if (ret) { 900 dev_err(dev, "cannot deassert pipe sticky reset\n"); 901 goto err_rst_pipe_sticky; 902 } 903 904 usleep_range(10000, 12000); 905 906 ret = reset_control_deassert(res->axi_m_reset); 907 if (ret) { 908 dev_err(dev, "cannot deassert axi master reset\n"); 909 goto err_rst_axi_m; 910 } 911 912 ret = reset_control_deassert(res->axi_m_sticky_reset); 913 if (ret) { 914 dev_err(dev, "cannot deassert axi master sticky reset\n"); 915 goto err_rst_axi_m_sticky; 916 } 917 918 ret = reset_control_deassert(res->axi_s_reset); 919 if (ret) { 920 dev_err(dev, "cannot deassert axi slave reset\n"); 921 goto err_rst_axi_s; 922 } 923 924 ret = reset_control_deassert(res->pwr_reset); 925 if (ret) { 926 dev_err(dev, "cannot deassert power reset\n"); 927 goto err_rst_pwr; 928 } 929 930 ret = reset_control_deassert(res->ahb_reset); 931 if (ret) { 932 dev_err(dev, "cannot deassert ahb reset\n"); 933 goto err_rst_ahb; 934 } 935 936 usleep_range(10000, 12000); 937 938 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 939 if (ret) 940 goto err_clks; 941 942 /* enable PCIe clocks and resets */ 943 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 944 val &= ~BIT(0); 945 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 946 947 /* change DBI base address */ 948 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 949 950 /* MAC PHY_POWERDOWN MUX DISABLE */ 951 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 952 val &= ~BIT(29); 953 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 954 955 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 956 val |= BIT(4); 957 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 958 959 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 960 val |= BIT(31); 961 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 962 963 return 0; 964 965 err_clks: 966 reset_control_assert(res->ahb_reset); 967 err_rst_ahb: 968 reset_control_assert(res->pwr_reset); 969 err_rst_pwr: 970 reset_control_assert(res->axi_s_reset); 971 err_rst_axi_s: 972 reset_control_assert(res->axi_m_sticky_reset); 973 err_rst_axi_m_sticky: 974 reset_control_assert(res->axi_m_reset); 975 err_rst_axi_m: 976 reset_control_assert(res->pipe_sticky_reset); 977 err_rst_pipe_sticky: 978 reset_control_assert(res->pipe_reset); 979 err_rst_pipe: 980 reset_control_assert(res->phy_reset); 981 err_rst_phy: 982 reset_control_assert(res->phy_ahb_reset); 983 return ret; 984 } 985 986 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 987 { 988 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 989 struct dw_pcie *pci = pcie->pci; 990 struct device *dev = pci->dev; 991 int i; 992 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 993 "axi_m_sticky", "sticky", 994 "ahb", "sleep", }; 995 996 res->iface = devm_clk_get(dev, "iface"); 997 if (IS_ERR(res->iface)) 998 return PTR_ERR(res->iface); 999 1000 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 1001 if (IS_ERR(res->axi_m_clk)) 1002 return PTR_ERR(res->axi_m_clk); 1003 1004 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 1005 if (IS_ERR(res->axi_s_clk)) 1006 return PTR_ERR(res->axi_s_clk); 1007 1008 res->ahb_clk = devm_clk_get(dev, "ahb"); 1009 if (IS_ERR(res->ahb_clk)) 1010 return PTR_ERR(res->ahb_clk); 1011 1012 res->aux_clk = devm_clk_get(dev, "aux"); 1013 if (IS_ERR(res->aux_clk)) 1014 return PTR_ERR(res->aux_clk); 1015 1016 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1017 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1018 if (IS_ERR(res->rst[i])) 1019 return PTR_ERR(res->rst[i]); 1020 } 1021 1022 return 0; 1023 } 1024 1025 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1026 { 1027 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1028 1029 clk_disable_unprepare(res->iface); 1030 clk_disable_unprepare(res->axi_m_clk); 1031 clk_disable_unprepare(res->axi_s_clk); 1032 clk_disable_unprepare(res->ahb_clk); 1033 clk_disable_unprepare(res->aux_clk); 1034 } 1035 1036 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1037 { 1038 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1039 struct dw_pcie *pci = pcie->pci; 1040 struct device *dev = pci->dev; 1041 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1042 int i, ret; 1043 u32 val; 1044 1045 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1046 ret = reset_control_assert(res->rst[i]); 1047 if (ret) { 1048 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1049 return ret; 1050 } 1051 } 1052 1053 usleep_range(2000, 2500); 1054 1055 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1056 ret = reset_control_deassert(res->rst[i]); 1057 if (ret) { 1058 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1059 ret); 1060 return ret; 1061 } 1062 } 1063 1064 /* 1065 * Don't have a way to see if the reset has completed. 1066 * Wait for some time. 1067 */ 1068 usleep_range(2000, 2500); 1069 1070 ret = clk_prepare_enable(res->iface); 1071 if (ret) { 1072 dev_err(dev, "cannot prepare/enable core clock\n"); 1073 goto err_clk_iface; 1074 } 1075 1076 ret = clk_prepare_enable(res->axi_m_clk); 1077 if (ret) { 1078 dev_err(dev, "cannot prepare/enable core clock\n"); 1079 goto err_clk_axi_m; 1080 } 1081 1082 ret = clk_prepare_enable(res->axi_s_clk); 1083 if (ret) { 1084 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1085 goto err_clk_axi_s; 1086 } 1087 1088 ret = clk_prepare_enable(res->ahb_clk); 1089 if (ret) { 1090 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1091 goto err_clk_ahb; 1092 } 1093 1094 ret = clk_prepare_enable(res->aux_clk); 1095 if (ret) { 1096 dev_err(dev, "cannot prepare/enable aux clock\n"); 1097 goto err_clk_aux; 1098 } 1099 1100 writel(SLV_ADDR_SPACE_SZ, 1101 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1102 1103 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1104 val &= ~BIT(0); 1105 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1106 1107 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1108 1109 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1110 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1111 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1112 pcie->parf + PCIE20_PARF_SYS_CTRL); 1113 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1114 1115 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1116 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1117 writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1118 1119 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1120 val &= ~PCI_EXP_LNKCAP_ASPMS; 1121 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1122 1123 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1124 PCI_EXP_DEVCTL2); 1125 1126 return 0; 1127 1128 err_clk_aux: 1129 clk_disable_unprepare(res->ahb_clk); 1130 err_clk_ahb: 1131 clk_disable_unprepare(res->axi_s_clk); 1132 err_clk_axi_s: 1133 clk_disable_unprepare(res->axi_m_clk); 1134 err_clk_axi_m: 1135 clk_disable_unprepare(res->iface); 1136 err_clk_iface: 1137 /* 1138 * Not checking for failure, will anyway return 1139 * the original failure in 'ret'. 1140 */ 1141 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1142 reset_control_assert(res->rst[i]); 1143 1144 return ret; 1145 } 1146 1147 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1148 { 1149 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1150 struct dw_pcie *pci = pcie->pci; 1151 struct device *dev = pci->dev; 1152 unsigned int idx; 1153 int ret; 1154 1155 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1156 if (IS_ERR(res->pci_reset)) 1157 return PTR_ERR(res->pci_reset); 1158 1159 res->supplies[0].supply = "vdda"; 1160 res->supplies[1].supply = "vddpe-3v3"; 1161 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1162 res->supplies); 1163 if (ret) 1164 return ret; 1165 1166 idx = 0; 1167 res->clks[idx++].id = "aux"; 1168 res->clks[idx++].id = "cfg"; 1169 res->clks[idx++].id = "bus_master"; 1170 res->clks[idx++].id = "bus_slave"; 1171 res->clks[idx++].id = "slave_q2a"; 1172 if (pcie->cfg->has_tbu_clk) 1173 res->clks[idx++].id = "tbu"; 1174 if (pcie->cfg->has_ddrss_sf_tbu_clk) 1175 res->clks[idx++].id = "ddrss_sf_tbu"; 1176 if (pcie->cfg->has_aggre0_clk) 1177 res->clks[idx++].id = "aggre0"; 1178 if (pcie->cfg->has_aggre1_clk) 1179 res->clks[idx++].id = "aggre1"; 1180 1181 res->num_clks = idx; 1182 1183 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 1184 if (ret < 0) 1185 return ret; 1186 1187 if (pcie->cfg->pipe_clk_need_muxing) { 1188 res->pipe_clk_src = devm_clk_get(dev, "pipe_mux"); 1189 if (IS_ERR(res->pipe_clk_src)) 1190 return PTR_ERR(res->pipe_clk_src); 1191 1192 res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe"); 1193 if (IS_ERR(res->phy_pipe_clk)) 1194 return PTR_ERR(res->phy_pipe_clk); 1195 1196 res->ref_clk_src = devm_clk_get(dev, "ref"); 1197 if (IS_ERR(res->ref_clk_src)) 1198 return PTR_ERR(res->ref_clk_src); 1199 } 1200 1201 res->pipe_clk = devm_clk_get(dev, "pipe"); 1202 return PTR_ERR_OR_ZERO(res->pipe_clk); 1203 } 1204 1205 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1206 { 1207 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1208 struct dw_pcie *pci = pcie->pci; 1209 struct device *dev = pci->dev; 1210 u32 val; 1211 int ret; 1212 1213 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1214 if (ret < 0) { 1215 dev_err(dev, "cannot enable regulators\n"); 1216 return ret; 1217 } 1218 1219 /* Set TCXO as clock source for pcie_pipe_clk_src */ 1220 if (pcie->cfg->pipe_clk_need_muxing) 1221 clk_set_parent(res->pipe_clk_src, res->ref_clk_src); 1222 1223 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 1224 if (ret < 0) 1225 goto err_disable_regulators; 1226 1227 ret = reset_control_assert(res->pci_reset); 1228 if (ret < 0) { 1229 dev_err(dev, "cannot deassert pci reset\n"); 1230 goto err_disable_clocks; 1231 } 1232 1233 usleep_range(1000, 1500); 1234 1235 ret = reset_control_deassert(res->pci_reset); 1236 if (ret < 0) { 1237 dev_err(dev, "cannot deassert pci reset\n"); 1238 goto err_disable_clocks; 1239 } 1240 1241 /* Wait for reset to complete, required on SM8450 */ 1242 usleep_range(1000, 1500); 1243 1244 /* configure PCIe to RC mode */ 1245 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1246 1247 /* enable PCIe clocks and resets */ 1248 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1249 val &= ~BIT(0); 1250 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1251 1252 /* change DBI base address */ 1253 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1254 1255 /* MAC PHY_POWERDOWN MUX DISABLE */ 1256 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1257 val &= ~BIT(29); 1258 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1259 1260 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1261 val |= BIT(4); 1262 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1263 1264 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1265 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1266 val |= BIT(31); 1267 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1268 } 1269 1270 return 0; 1271 err_disable_clocks: 1272 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1273 err_disable_regulators: 1274 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1275 1276 return ret; 1277 } 1278 1279 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1280 { 1281 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1282 1283 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1284 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1285 } 1286 1287 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1288 { 1289 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1290 1291 /* Set pipe clock as clock source for pcie_pipe_clk_src */ 1292 if (pcie->cfg->pipe_clk_need_muxing) 1293 clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk); 1294 1295 return clk_prepare_enable(res->pipe_clk); 1296 } 1297 1298 static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie) 1299 { 1300 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1301 1302 clk_disable_unprepare(res->pipe_clk); 1303 } 1304 1305 static int qcom_pcie_link_up(struct dw_pcie *pci) 1306 { 1307 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1308 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1309 1310 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1311 } 1312 1313 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1314 { 1315 /* iommu map structure */ 1316 struct { 1317 u32 bdf; 1318 u32 phandle; 1319 u32 smmu_sid; 1320 u32 smmu_sid_len; 1321 } *map; 1322 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1323 struct device *dev = pcie->pci->dev; 1324 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1325 int i, nr_map, size = 0; 1326 u32 smmu_sid_base; 1327 1328 of_get_property(dev->of_node, "iommu-map", &size); 1329 if (!size) 1330 return 0; 1331 1332 map = kzalloc(size, GFP_KERNEL); 1333 if (!map) 1334 return -ENOMEM; 1335 1336 of_property_read_u32_array(dev->of_node, 1337 "iommu-map", (u32 *)map, size / sizeof(u32)); 1338 1339 nr_map = size / (sizeof(*map)); 1340 1341 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1342 1343 /* Registers need to be zero out first */ 1344 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1345 1346 /* Extract the SMMU SID base from the first entry of iommu-map */ 1347 smmu_sid_base = map[0].smmu_sid; 1348 1349 /* Look for an available entry to hold the mapping */ 1350 for (i = 0; i < nr_map; i++) { 1351 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1352 u32 val; 1353 u8 hash; 1354 1355 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1356 0); 1357 1358 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1359 1360 /* If the register is already populated, look for next available entry */ 1361 while (val) { 1362 u8 current_hash = hash++; 1363 u8 next_mask = 0xff; 1364 1365 /* If NEXT field is NULL then update it with next hash */ 1366 if (!(val & next_mask)) { 1367 val |= (u32)hash; 1368 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1369 } 1370 1371 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1372 } 1373 1374 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1375 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1376 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1377 } 1378 1379 kfree(map); 1380 1381 return 0; 1382 } 1383 1384 static int qcom_pcie_host_init(struct pcie_port *pp) 1385 { 1386 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1387 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1388 int ret; 1389 1390 qcom_ep_reset_assert(pcie); 1391 1392 ret = pcie->cfg->ops->init(pcie); 1393 if (ret) 1394 return ret; 1395 1396 ret = phy_power_on(pcie->phy); 1397 if (ret) 1398 goto err_deinit; 1399 1400 if (pcie->cfg->ops->post_init) { 1401 ret = pcie->cfg->ops->post_init(pcie); 1402 if (ret) 1403 goto err_disable_phy; 1404 } 1405 1406 qcom_ep_reset_deassert(pcie); 1407 1408 if (pcie->cfg->ops->config_sid) { 1409 ret = pcie->cfg->ops->config_sid(pcie); 1410 if (ret) 1411 goto err; 1412 } 1413 1414 return 0; 1415 1416 err: 1417 qcom_ep_reset_assert(pcie); 1418 if (pcie->cfg->ops->post_deinit) 1419 pcie->cfg->ops->post_deinit(pcie); 1420 err_disable_phy: 1421 phy_power_off(pcie->phy); 1422 err_deinit: 1423 pcie->cfg->ops->deinit(pcie); 1424 1425 return ret; 1426 } 1427 1428 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1429 .host_init = qcom_pcie_host_init, 1430 }; 1431 1432 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1433 static const struct qcom_pcie_ops ops_2_1_0 = { 1434 .get_resources = qcom_pcie_get_resources_2_1_0, 1435 .init = qcom_pcie_init_2_1_0, 1436 .deinit = qcom_pcie_deinit_2_1_0, 1437 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1438 }; 1439 1440 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1441 static const struct qcom_pcie_ops ops_1_0_0 = { 1442 .get_resources = qcom_pcie_get_resources_1_0_0, 1443 .init = qcom_pcie_init_1_0_0, 1444 .deinit = qcom_pcie_deinit_1_0_0, 1445 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1446 }; 1447 1448 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1449 static const struct qcom_pcie_ops ops_2_3_2 = { 1450 .get_resources = qcom_pcie_get_resources_2_3_2, 1451 .init = qcom_pcie_init_2_3_2, 1452 .post_init = qcom_pcie_post_init_2_3_2, 1453 .deinit = qcom_pcie_deinit_2_3_2, 1454 .post_deinit = qcom_pcie_post_deinit_2_3_2, 1455 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1456 }; 1457 1458 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1459 static const struct qcom_pcie_ops ops_2_4_0 = { 1460 .get_resources = qcom_pcie_get_resources_2_4_0, 1461 .init = qcom_pcie_init_2_4_0, 1462 .deinit = qcom_pcie_deinit_2_4_0, 1463 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1464 }; 1465 1466 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1467 static const struct qcom_pcie_ops ops_2_3_3 = { 1468 .get_resources = qcom_pcie_get_resources_2_3_3, 1469 .init = qcom_pcie_init_2_3_3, 1470 .deinit = qcom_pcie_deinit_2_3_3, 1471 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1472 }; 1473 1474 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1475 static const struct qcom_pcie_ops ops_2_7_0 = { 1476 .get_resources = qcom_pcie_get_resources_2_7_0, 1477 .init = qcom_pcie_init_2_7_0, 1478 .deinit = qcom_pcie_deinit_2_7_0, 1479 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1480 .post_init = qcom_pcie_post_init_2_7_0, 1481 .post_deinit = qcom_pcie_post_deinit_2_7_0, 1482 }; 1483 1484 /* Qcom IP rev.: 1.9.0 */ 1485 static const struct qcom_pcie_ops ops_1_9_0 = { 1486 .get_resources = qcom_pcie_get_resources_2_7_0, 1487 .init = qcom_pcie_init_2_7_0, 1488 .deinit = qcom_pcie_deinit_2_7_0, 1489 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1490 .post_init = qcom_pcie_post_init_2_7_0, 1491 .post_deinit = qcom_pcie_post_deinit_2_7_0, 1492 .config_sid = qcom_pcie_config_sid_sm8250, 1493 }; 1494 1495 static const struct qcom_pcie_cfg apq8084_cfg = { 1496 .ops = &ops_1_0_0, 1497 }; 1498 1499 static const struct qcom_pcie_cfg ipq8064_cfg = { 1500 .ops = &ops_2_1_0, 1501 }; 1502 1503 static const struct qcom_pcie_cfg msm8996_cfg = { 1504 .ops = &ops_2_3_2, 1505 }; 1506 1507 static const struct qcom_pcie_cfg ipq8074_cfg = { 1508 .ops = &ops_2_3_3, 1509 }; 1510 1511 static const struct qcom_pcie_cfg ipq4019_cfg = { 1512 .ops = &ops_2_4_0, 1513 }; 1514 1515 static const struct qcom_pcie_cfg sdm845_cfg = { 1516 .ops = &ops_2_7_0, 1517 .has_tbu_clk = true, 1518 }; 1519 1520 static const struct qcom_pcie_cfg sm8150_cfg = { 1521 /* sm8150 has qcom IP rev 1.5.0. However 1.5.0 ops are same as 1522 * 1.9.0, so reuse the same. 1523 */ 1524 .ops = &ops_1_9_0, 1525 }; 1526 1527 static const struct qcom_pcie_cfg sm8250_cfg = { 1528 .ops = &ops_1_9_0, 1529 .has_tbu_clk = true, 1530 .has_ddrss_sf_tbu_clk = true, 1531 }; 1532 1533 static const struct qcom_pcie_cfg sm8450_pcie0_cfg = { 1534 .ops = &ops_1_9_0, 1535 .has_ddrss_sf_tbu_clk = true, 1536 .pipe_clk_need_muxing = true, 1537 .has_aggre0_clk = true, 1538 .has_aggre1_clk = true, 1539 }; 1540 1541 static const struct qcom_pcie_cfg sm8450_pcie1_cfg = { 1542 .ops = &ops_1_9_0, 1543 .has_ddrss_sf_tbu_clk = true, 1544 .pipe_clk_need_muxing = true, 1545 .has_aggre1_clk = true, 1546 }; 1547 1548 static const struct qcom_pcie_cfg sc7280_cfg = { 1549 .ops = &ops_1_9_0, 1550 .has_tbu_clk = true, 1551 .pipe_clk_need_muxing = true, 1552 }; 1553 1554 static const struct qcom_pcie_cfg sc8180x_cfg = { 1555 .ops = &ops_1_9_0, 1556 .has_tbu_clk = true, 1557 }; 1558 1559 static const struct dw_pcie_ops dw_pcie_ops = { 1560 .link_up = qcom_pcie_link_up, 1561 .start_link = qcom_pcie_start_link, 1562 }; 1563 1564 static int qcom_pcie_probe(struct platform_device *pdev) 1565 { 1566 struct device *dev = &pdev->dev; 1567 struct pcie_port *pp; 1568 struct dw_pcie *pci; 1569 struct qcom_pcie *pcie; 1570 const struct qcom_pcie_cfg *pcie_cfg; 1571 int ret; 1572 1573 pcie_cfg = of_device_get_match_data(dev); 1574 if (!pcie_cfg || !pcie_cfg->ops) { 1575 dev_err(dev, "Invalid platform data\n"); 1576 return -EINVAL; 1577 } 1578 1579 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1580 if (!pcie) 1581 return -ENOMEM; 1582 1583 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1584 if (!pci) 1585 return -ENOMEM; 1586 1587 pm_runtime_enable(dev); 1588 ret = pm_runtime_get_sync(dev); 1589 if (ret < 0) 1590 goto err_pm_runtime_put; 1591 1592 pci->dev = dev; 1593 pci->ops = &dw_pcie_ops; 1594 pp = &pci->pp; 1595 1596 pcie->pci = pci; 1597 1598 pcie->cfg = pcie_cfg; 1599 1600 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1601 if (IS_ERR(pcie->reset)) { 1602 ret = PTR_ERR(pcie->reset); 1603 goto err_pm_runtime_put; 1604 } 1605 1606 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1607 if (IS_ERR(pcie->parf)) { 1608 ret = PTR_ERR(pcie->parf); 1609 goto err_pm_runtime_put; 1610 } 1611 1612 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1613 if (IS_ERR(pcie->elbi)) { 1614 ret = PTR_ERR(pcie->elbi); 1615 goto err_pm_runtime_put; 1616 } 1617 1618 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1619 if (IS_ERR(pcie->phy)) { 1620 ret = PTR_ERR(pcie->phy); 1621 goto err_pm_runtime_put; 1622 } 1623 1624 ret = pcie->cfg->ops->get_resources(pcie); 1625 if (ret) 1626 goto err_pm_runtime_put; 1627 1628 pp->ops = &qcom_pcie_dw_ops; 1629 1630 ret = phy_init(pcie->phy); 1631 if (ret) 1632 goto err_pm_runtime_put; 1633 1634 platform_set_drvdata(pdev, pcie); 1635 1636 ret = dw_pcie_host_init(pp); 1637 if (ret) { 1638 dev_err(dev, "cannot initialize host\n"); 1639 goto err_phy_exit; 1640 } 1641 1642 return 0; 1643 1644 err_phy_exit: 1645 phy_exit(pcie->phy); 1646 err_pm_runtime_put: 1647 pm_runtime_put(dev); 1648 pm_runtime_disable(dev); 1649 1650 return ret; 1651 } 1652 1653 static const struct of_device_id qcom_pcie_match[] = { 1654 { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg }, 1655 { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg }, 1656 { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg }, 1657 { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg }, 1658 { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg }, 1659 { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg }, 1660 { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg }, 1661 { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg }, 1662 { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg }, 1663 { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg }, 1664 { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg }, 1665 { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg }, 1666 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg }, 1667 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg }, 1668 { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg }, 1669 { } 1670 }; 1671 1672 static void qcom_fixup_class(struct pci_dev *dev) 1673 { 1674 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1675 } 1676 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1677 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1678 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1679 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1680 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1681 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1682 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1683 1684 static struct platform_driver qcom_pcie_driver = { 1685 .probe = qcom_pcie_probe, 1686 .driver = { 1687 .name = "qcom-pcie", 1688 .suppress_bind_attrs = true, 1689 .of_match_table = qcom_pcie_match, 1690 }, 1691 }; 1692 builtin_platform_driver(qcom_pcie_driver); 1693