1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/pci.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/platform_device.h> 25 #include <linux/phy/phy.h> 26 #include <linux/regulator/consumer.h> 27 #include <linux/reset.h> 28 #include <linux/slab.h> 29 #include <linux/types.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 #define PCIE20_PARF_SYS_CTRL 0x00 35 #define MST_WAKEUP_EN BIT(13) 36 #define SLV_WAKEUP_EN BIT(12) 37 #define MSTR_ACLK_CGC_DIS BIT(10) 38 #define SLV_ACLK_CGC_DIS BIT(9) 39 #define CORE_CLK_CGC_DIS BIT(6) 40 #define AUX_PWR_DET BIT(4) 41 #define L23_CLK_RMV_DIS BIT(2) 42 #define L1_CLK_RMV_DIS BIT(1) 43 44 #define PCIE20_PARF_PHY_CTRL 0x40 45 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 46 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 47 48 #define PCIE20_PARF_PHY_REFCLK 0x4C 49 #define PHY_REFCLK_SSP_EN BIT(16) 50 #define PHY_REFCLK_USE_PAD BIT(12) 51 52 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 53 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 54 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 55 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 56 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 57 #define PCIE20_PARF_LTSSM 0x1B0 58 #define PCIE20_PARF_SID_OFFSET 0x234 59 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 60 #define PCIE20_PARF_DEVICE_TYPE 0x1000 61 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 62 63 #define PCIE20_ELBI_SYS_CTRL 0x04 64 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 65 66 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 67 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 68 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 69 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 70 #define CFG_BRIDGE_SB_INIT BIT(0) 71 72 #define PCIE_CAP_LINK1_VAL 0x2FD7F 73 74 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 75 76 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 77 #define DBI_RO_WR_EN 1 78 79 #define PERST_DELAY_US 1000 80 /* PARF registers */ 81 #define PCIE20_PARF_PCS_DEEMPH 0x34 82 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 83 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 84 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 85 86 #define PCIE20_PARF_PCS_SWING 0x38 87 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 88 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 89 90 #define PCIE20_PARF_CONFIG_BITS 0x50 91 #define PHY_RX0_EQ(x) ((x) << 24) 92 93 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 94 #define SLV_ADDR_SPACE_SZ 0x10000000 95 96 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 97 98 #define DEVICE_TYPE_RC 0x4 99 100 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 101 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 102 103 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 104 105 struct qcom_pcie_resources_2_1_0 { 106 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 107 struct reset_control *pci_reset; 108 struct reset_control *axi_reset; 109 struct reset_control *ahb_reset; 110 struct reset_control *por_reset; 111 struct reset_control *phy_reset; 112 struct reset_control *ext_reset; 113 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 114 }; 115 116 struct qcom_pcie_resources_1_0_0 { 117 struct clk *iface; 118 struct clk *aux; 119 struct clk *master_bus; 120 struct clk *slave_bus; 121 struct reset_control *core; 122 struct regulator *vdda; 123 }; 124 125 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 126 struct qcom_pcie_resources_2_3_2 { 127 struct clk *aux_clk; 128 struct clk *master_clk; 129 struct clk *slave_clk; 130 struct clk *cfg_clk; 131 struct clk *pipe_clk; 132 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 133 }; 134 135 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 136 struct qcom_pcie_resources_2_4_0 { 137 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 138 int num_clks; 139 struct reset_control *axi_m_reset; 140 struct reset_control *axi_s_reset; 141 struct reset_control *pipe_reset; 142 struct reset_control *axi_m_vmid_reset; 143 struct reset_control *axi_s_xpu_reset; 144 struct reset_control *parf_reset; 145 struct reset_control *phy_reset; 146 struct reset_control *axi_m_sticky_reset; 147 struct reset_control *pipe_sticky_reset; 148 struct reset_control *pwr_reset; 149 struct reset_control *ahb_reset; 150 struct reset_control *phy_ahb_reset; 151 }; 152 153 struct qcom_pcie_resources_2_3_3 { 154 struct clk *iface; 155 struct clk *axi_m_clk; 156 struct clk *axi_s_clk; 157 struct clk *ahb_clk; 158 struct clk *aux_clk; 159 struct reset_control *rst[7]; 160 }; 161 162 /* 6 clocks typically, 7 for sm8250 */ 163 struct qcom_pcie_resources_2_7_0 { 164 struct clk_bulk_data clks[7]; 165 int num_clks; 166 struct regulator_bulk_data supplies[2]; 167 struct reset_control *pci_reset; 168 struct clk *pipe_clk; 169 struct clk *pipe_clk_src; 170 struct clk *phy_pipe_clk; 171 struct clk *ref_clk_src; 172 }; 173 174 union qcom_pcie_resources { 175 struct qcom_pcie_resources_1_0_0 v1_0_0; 176 struct qcom_pcie_resources_2_1_0 v2_1_0; 177 struct qcom_pcie_resources_2_3_2 v2_3_2; 178 struct qcom_pcie_resources_2_3_3 v2_3_3; 179 struct qcom_pcie_resources_2_4_0 v2_4_0; 180 struct qcom_pcie_resources_2_7_0 v2_7_0; 181 }; 182 183 struct qcom_pcie; 184 185 struct qcom_pcie_ops { 186 int (*get_resources)(struct qcom_pcie *pcie); 187 int (*init)(struct qcom_pcie *pcie); 188 int (*post_init)(struct qcom_pcie *pcie); 189 void (*deinit)(struct qcom_pcie *pcie); 190 void (*post_deinit)(struct qcom_pcie *pcie); 191 void (*ltssm_enable)(struct qcom_pcie *pcie); 192 int (*config_sid)(struct qcom_pcie *pcie); 193 }; 194 195 struct qcom_pcie_cfg { 196 const struct qcom_pcie_ops *ops; 197 unsigned int pipe_clk_need_muxing:1; 198 }; 199 200 struct qcom_pcie { 201 struct dw_pcie *pci; 202 void __iomem *parf; /* DT parf */ 203 void __iomem *elbi; /* DT elbi */ 204 union qcom_pcie_resources res; 205 struct phy *phy; 206 struct gpio_desc *reset; 207 const struct qcom_pcie_ops *ops; 208 unsigned int pipe_clk_need_muxing:1; 209 }; 210 211 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 212 213 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 214 { 215 gpiod_set_value_cansleep(pcie->reset, 1); 216 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 217 } 218 219 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 220 { 221 /* Ensure that PERST has been asserted for at least 100 ms */ 222 msleep(100); 223 gpiod_set_value_cansleep(pcie->reset, 0); 224 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 225 } 226 227 static int qcom_pcie_start_link(struct dw_pcie *pci) 228 { 229 struct qcom_pcie *pcie = to_qcom_pcie(pci); 230 231 /* Enable Link Training state machine */ 232 if (pcie->ops->ltssm_enable) 233 pcie->ops->ltssm_enable(pcie); 234 235 return 0; 236 } 237 238 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 239 { 240 u32 val; 241 242 /* enable link training */ 243 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 244 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 245 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 246 } 247 248 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 249 { 250 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 251 struct dw_pcie *pci = pcie->pci; 252 struct device *dev = pci->dev; 253 int ret; 254 255 res->supplies[0].supply = "vdda"; 256 res->supplies[1].supply = "vdda_phy"; 257 res->supplies[2].supply = "vdda_refclk"; 258 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 259 res->supplies); 260 if (ret) 261 return ret; 262 263 res->clks[0].id = "iface"; 264 res->clks[1].id = "core"; 265 res->clks[2].id = "phy"; 266 res->clks[3].id = "aux"; 267 res->clks[4].id = "ref"; 268 269 /* iface, core, phy are required */ 270 ret = devm_clk_bulk_get(dev, 3, res->clks); 271 if (ret < 0) 272 return ret; 273 274 /* aux, ref are optional */ 275 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 276 if (ret < 0) 277 return ret; 278 279 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 280 if (IS_ERR(res->pci_reset)) 281 return PTR_ERR(res->pci_reset); 282 283 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 284 if (IS_ERR(res->axi_reset)) 285 return PTR_ERR(res->axi_reset); 286 287 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 288 if (IS_ERR(res->ahb_reset)) 289 return PTR_ERR(res->ahb_reset); 290 291 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 292 if (IS_ERR(res->por_reset)) 293 return PTR_ERR(res->por_reset); 294 295 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 296 if (IS_ERR(res->ext_reset)) 297 return PTR_ERR(res->ext_reset); 298 299 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 300 return PTR_ERR_OR_ZERO(res->phy_reset); 301 } 302 303 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 304 { 305 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 306 307 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 308 reset_control_assert(res->pci_reset); 309 reset_control_assert(res->axi_reset); 310 reset_control_assert(res->ahb_reset); 311 reset_control_assert(res->por_reset); 312 reset_control_assert(res->ext_reset); 313 reset_control_assert(res->phy_reset); 314 315 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 316 317 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 318 } 319 320 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 321 { 322 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 323 struct dw_pcie *pci = pcie->pci; 324 struct device *dev = pci->dev; 325 struct device_node *node = dev->of_node; 326 u32 val; 327 int ret; 328 329 /* reset the PCIe interface as uboot can leave it undefined state */ 330 reset_control_assert(res->pci_reset); 331 reset_control_assert(res->axi_reset); 332 reset_control_assert(res->ahb_reset); 333 reset_control_assert(res->por_reset); 334 reset_control_assert(res->ext_reset); 335 reset_control_assert(res->phy_reset); 336 337 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 338 339 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 340 if (ret < 0) { 341 dev_err(dev, "cannot enable regulators\n"); 342 return ret; 343 } 344 345 ret = reset_control_deassert(res->ahb_reset); 346 if (ret) { 347 dev_err(dev, "cannot deassert ahb reset\n"); 348 goto err_deassert_ahb; 349 } 350 351 ret = reset_control_deassert(res->ext_reset); 352 if (ret) { 353 dev_err(dev, "cannot deassert ext reset\n"); 354 goto err_deassert_ext; 355 } 356 357 ret = reset_control_deassert(res->phy_reset); 358 if (ret) { 359 dev_err(dev, "cannot deassert phy reset\n"); 360 goto err_deassert_phy; 361 } 362 363 ret = reset_control_deassert(res->pci_reset); 364 if (ret) { 365 dev_err(dev, "cannot deassert pci reset\n"); 366 goto err_deassert_pci; 367 } 368 369 ret = reset_control_deassert(res->por_reset); 370 if (ret) { 371 dev_err(dev, "cannot deassert por reset\n"); 372 goto err_deassert_por; 373 } 374 375 ret = reset_control_deassert(res->axi_reset); 376 if (ret) { 377 dev_err(dev, "cannot deassert axi reset\n"); 378 goto err_deassert_axi; 379 } 380 381 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 382 if (ret) 383 goto err_clks; 384 385 /* enable PCIe clocks and resets */ 386 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 387 val &= ~BIT(0); 388 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 389 390 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 391 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 392 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 393 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 394 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 395 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 396 writel(PCS_SWING_TX_SWING_FULL(120) | 397 PCS_SWING_TX_SWING_LOW(120), 398 pcie->parf + PCIE20_PARF_PCS_SWING); 399 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 400 } 401 402 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 403 /* set TX termination offset */ 404 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 405 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 406 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 407 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 408 } 409 410 /* enable external reference clock */ 411 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 412 /* USE_PAD is required only for ipq806x */ 413 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 414 val &= ~PHY_REFCLK_USE_PAD; 415 val |= PHY_REFCLK_SSP_EN; 416 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 417 418 /* wait for clock acquisition */ 419 usleep_range(1000, 1500); 420 421 /* Set the Max TLP size to 2K, instead of using default of 4K */ 422 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 423 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 424 writel(CFG_BRIDGE_SB_INIT, 425 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 426 427 return 0; 428 429 err_clks: 430 reset_control_assert(res->axi_reset); 431 err_deassert_axi: 432 reset_control_assert(res->por_reset); 433 err_deassert_por: 434 reset_control_assert(res->pci_reset); 435 err_deassert_pci: 436 reset_control_assert(res->phy_reset); 437 err_deassert_phy: 438 reset_control_assert(res->ext_reset); 439 err_deassert_ext: 440 reset_control_assert(res->ahb_reset); 441 err_deassert_ahb: 442 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 443 444 return ret; 445 } 446 447 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 448 { 449 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 450 struct dw_pcie *pci = pcie->pci; 451 struct device *dev = pci->dev; 452 453 res->vdda = devm_regulator_get(dev, "vdda"); 454 if (IS_ERR(res->vdda)) 455 return PTR_ERR(res->vdda); 456 457 res->iface = devm_clk_get(dev, "iface"); 458 if (IS_ERR(res->iface)) 459 return PTR_ERR(res->iface); 460 461 res->aux = devm_clk_get(dev, "aux"); 462 if (IS_ERR(res->aux)) 463 return PTR_ERR(res->aux); 464 465 res->master_bus = devm_clk_get(dev, "master_bus"); 466 if (IS_ERR(res->master_bus)) 467 return PTR_ERR(res->master_bus); 468 469 res->slave_bus = devm_clk_get(dev, "slave_bus"); 470 if (IS_ERR(res->slave_bus)) 471 return PTR_ERR(res->slave_bus); 472 473 res->core = devm_reset_control_get_exclusive(dev, "core"); 474 return PTR_ERR_OR_ZERO(res->core); 475 } 476 477 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 478 { 479 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 480 481 reset_control_assert(res->core); 482 clk_disable_unprepare(res->slave_bus); 483 clk_disable_unprepare(res->master_bus); 484 clk_disable_unprepare(res->iface); 485 clk_disable_unprepare(res->aux); 486 regulator_disable(res->vdda); 487 } 488 489 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 490 { 491 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 492 struct dw_pcie *pci = pcie->pci; 493 struct device *dev = pci->dev; 494 int ret; 495 496 ret = reset_control_deassert(res->core); 497 if (ret) { 498 dev_err(dev, "cannot deassert core reset\n"); 499 return ret; 500 } 501 502 ret = clk_prepare_enable(res->aux); 503 if (ret) { 504 dev_err(dev, "cannot prepare/enable aux clock\n"); 505 goto err_res; 506 } 507 508 ret = clk_prepare_enable(res->iface); 509 if (ret) { 510 dev_err(dev, "cannot prepare/enable iface clock\n"); 511 goto err_aux; 512 } 513 514 ret = clk_prepare_enable(res->master_bus); 515 if (ret) { 516 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 517 goto err_iface; 518 } 519 520 ret = clk_prepare_enable(res->slave_bus); 521 if (ret) { 522 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 523 goto err_master; 524 } 525 526 ret = regulator_enable(res->vdda); 527 if (ret) { 528 dev_err(dev, "cannot enable vdda regulator\n"); 529 goto err_slave; 530 } 531 532 /* change DBI base address */ 533 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 534 535 if (IS_ENABLED(CONFIG_PCI_MSI)) { 536 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 537 538 val |= BIT(31); 539 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 540 } 541 542 return 0; 543 err_slave: 544 clk_disable_unprepare(res->slave_bus); 545 err_master: 546 clk_disable_unprepare(res->master_bus); 547 err_iface: 548 clk_disable_unprepare(res->iface); 549 err_aux: 550 clk_disable_unprepare(res->aux); 551 err_res: 552 reset_control_assert(res->core); 553 554 return ret; 555 } 556 557 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 558 { 559 u32 val; 560 561 /* enable link training */ 562 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 563 val |= BIT(8); 564 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 565 } 566 567 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 568 { 569 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 570 struct dw_pcie *pci = pcie->pci; 571 struct device *dev = pci->dev; 572 int ret; 573 574 res->supplies[0].supply = "vdda"; 575 res->supplies[1].supply = "vddpe-3v3"; 576 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 577 res->supplies); 578 if (ret) 579 return ret; 580 581 res->aux_clk = devm_clk_get(dev, "aux"); 582 if (IS_ERR(res->aux_clk)) 583 return PTR_ERR(res->aux_clk); 584 585 res->cfg_clk = devm_clk_get(dev, "cfg"); 586 if (IS_ERR(res->cfg_clk)) 587 return PTR_ERR(res->cfg_clk); 588 589 res->master_clk = devm_clk_get(dev, "bus_master"); 590 if (IS_ERR(res->master_clk)) 591 return PTR_ERR(res->master_clk); 592 593 res->slave_clk = devm_clk_get(dev, "bus_slave"); 594 if (IS_ERR(res->slave_clk)) 595 return PTR_ERR(res->slave_clk); 596 597 res->pipe_clk = devm_clk_get(dev, "pipe"); 598 return PTR_ERR_OR_ZERO(res->pipe_clk); 599 } 600 601 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 602 { 603 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 604 605 clk_disable_unprepare(res->slave_clk); 606 clk_disable_unprepare(res->master_clk); 607 clk_disable_unprepare(res->cfg_clk); 608 clk_disable_unprepare(res->aux_clk); 609 610 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 611 } 612 613 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) 614 { 615 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 616 617 clk_disable_unprepare(res->pipe_clk); 618 } 619 620 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 621 { 622 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 623 struct dw_pcie *pci = pcie->pci; 624 struct device *dev = pci->dev; 625 u32 val; 626 int ret; 627 628 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 629 if (ret < 0) { 630 dev_err(dev, "cannot enable regulators\n"); 631 return ret; 632 } 633 634 ret = clk_prepare_enable(res->aux_clk); 635 if (ret) { 636 dev_err(dev, "cannot prepare/enable aux clock\n"); 637 goto err_aux_clk; 638 } 639 640 ret = clk_prepare_enable(res->cfg_clk); 641 if (ret) { 642 dev_err(dev, "cannot prepare/enable cfg clock\n"); 643 goto err_cfg_clk; 644 } 645 646 ret = clk_prepare_enable(res->master_clk); 647 if (ret) { 648 dev_err(dev, "cannot prepare/enable master clock\n"); 649 goto err_master_clk; 650 } 651 652 ret = clk_prepare_enable(res->slave_clk); 653 if (ret) { 654 dev_err(dev, "cannot prepare/enable slave clock\n"); 655 goto err_slave_clk; 656 } 657 658 /* enable PCIe clocks and resets */ 659 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 660 val &= ~BIT(0); 661 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 662 663 /* change DBI base address */ 664 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 665 666 /* MAC PHY_POWERDOWN MUX DISABLE */ 667 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 668 val &= ~BIT(29); 669 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 670 671 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 672 val |= BIT(4); 673 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 674 675 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 676 val |= BIT(31); 677 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 678 679 return 0; 680 681 err_slave_clk: 682 clk_disable_unprepare(res->master_clk); 683 err_master_clk: 684 clk_disable_unprepare(res->cfg_clk); 685 err_cfg_clk: 686 clk_disable_unprepare(res->aux_clk); 687 688 err_aux_clk: 689 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 690 691 return ret; 692 } 693 694 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 695 { 696 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 697 struct dw_pcie *pci = pcie->pci; 698 struct device *dev = pci->dev; 699 int ret; 700 701 ret = clk_prepare_enable(res->pipe_clk); 702 if (ret) { 703 dev_err(dev, "cannot prepare/enable pipe clock\n"); 704 return ret; 705 } 706 707 return 0; 708 } 709 710 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 711 { 712 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 713 struct dw_pcie *pci = pcie->pci; 714 struct device *dev = pci->dev; 715 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 716 int ret; 717 718 res->clks[0].id = "aux"; 719 res->clks[1].id = "master_bus"; 720 res->clks[2].id = "slave_bus"; 721 res->clks[3].id = "iface"; 722 723 /* qcom,pcie-ipq4019 is defined without "iface" */ 724 res->num_clks = is_ipq ? 3 : 4; 725 726 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 727 if (ret < 0) 728 return ret; 729 730 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 731 if (IS_ERR(res->axi_m_reset)) 732 return PTR_ERR(res->axi_m_reset); 733 734 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 735 if (IS_ERR(res->axi_s_reset)) 736 return PTR_ERR(res->axi_s_reset); 737 738 if (is_ipq) { 739 /* 740 * These resources relates to the PHY or are secure clocks, but 741 * are controlled here for IPQ4019 742 */ 743 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 744 if (IS_ERR(res->pipe_reset)) 745 return PTR_ERR(res->pipe_reset); 746 747 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 748 "axi_m_vmid"); 749 if (IS_ERR(res->axi_m_vmid_reset)) 750 return PTR_ERR(res->axi_m_vmid_reset); 751 752 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 753 "axi_s_xpu"); 754 if (IS_ERR(res->axi_s_xpu_reset)) 755 return PTR_ERR(res->axi_s_xpu_reset); 756 757 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 758 if (IS_ERR(res->parf_reset)) 759 return PTR_ERR(res->parf_reset); 760 761 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 762 if (IS_ERR(res->phy_reset)) 763 return PTR_ERR(res->phy_reset); 764 } 765 766 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 767 "axi_m_sticky"); 768 if (IS_ERR(res->axi_m_sticky_reset)) 769 return PTR_ERR(res->axi_m_sticky_reset); 770 771 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 772 "pipe_sticky"); 773 if (IS_ERR(res->pipe_sticky_reset)) 774 return PTR_ERR(res->pipe_sticky_reset); 775 776 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 777 if (IS_ERR(res->pwr_reset)) 778 return PTR_ERR(res->pwr_reset); 779 780 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 781 if (IS_ERR(res->ahb_reset)) 782 return PTR_ERR(res->ahb_reset); 783 784 if (is_ipq) { 785 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 786 if (IS_ERR(res->phy_ahb_reset)) 787 return PTR_ERR(res->phy_ahb_reset); 788 } 789 790 return 0; 791 } 792 793 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 794 { 795 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 796 797 reset_control_assert(res->axi_m_reset); 798 reset_control_assert(res->axi_s_reset); 799 reset_control_assert(res->pipe_reset); 800 reset_control_assert(res->pipe_sticky_reset); 801 reset_control_assert(res->phy_reset); 802 reset_control_assert(res->phy_ahb_reset); 803 reset_control_assert(res->axi_m_sticky_reset); 804 reset_control_assert(res->pwr_reset); 805 reset_control_assert(res->ahb_reset); 806 clk_bulk_disable_unprepare(res->num_clks, res->clks); 807 } 808 809 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 810 { 811 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 812 struct dw_pcie *pci = pcie->pci; 813 struct device *dev = pci->dev; 814 u32 val; 815 int ret; 816 817 ret = reset_control_assert(res->axi_m_reset); 818 if (ret) { 819 dev_err(dev, "cannot assert axi master reset\n"); 820 return ret; 821 } 822 823 ret = reset_control_assert(res->axi_s_reset); 824 if (ret) { 825 dev_err(dev, "cannot assert axi slave reset\n"); 826 return ret; 827 } 828 829 usleep_range(10000, 12000); 830 831 ret = reset_control_assert(res->pipe_reset); 832 if (ret) { 833 dev_err(dev, "cannot assert pipe reset\n"); 834 return ret; 835 } 836 837 ret = reset_control_assert(res->pipe_sticky_reset); 838 if (ret) { 839 dev_err(dev, "cannot assert pipe sticky reset\n"); 840 return ret; 841 } 842 843 ret = reset_control_assert(res->phy_reset); 844 if (ret) { 845 dev_err(dev, "cannot assert phy reset\n"); 846 return ret; 847 } 848 849 ret = reset_control_assert(res->phy_ahb_reset); 850 if (ret) { 851 dev_err(dev, "cannot assert phy ahb reset\n"); 852 return ret; 853 } 854 855 usleep_range(10000, 12000); 856 857 ret = reset_control_assert(res->axi_m_sticky_reset); 858 if (ret) { 859 dev_err(dev, "cannot assert axi master sticky reset\n"); 860 return ret; 861 } 862 863 ret = reset_control_assert(res->pwr_reset); 864 if (ret) { 865 dev_err(dev, "cannot assert power reset\n"); 866 return ret; 867 } 868 869 ret = reset_control_assert(res->ahb_reset); 870 if (ret) { 871 dev_err(dev, "cannot assert ahb reset\n"); 872 return ret; 873 } 874 875 usleep_range(10000, 12000); 876 877 ret = reset_control_deassert(res->phy_ahb_reset); 878 if (ret) { 879 dev_err(dev, "cannot deassert phy ahb reset\n"); 880 return ret; 881 } 882 883 ret = reset_control_deassert(res->phy_reset); 884 if (ret) { 885 dev_err(dev, "cannot deassert phy reset\n"); 886 goto err_rst_phy; 887 } 888 889 ret = reset_control_deassert(res->pipe_reset); 890 if (ret) { 891 dev_err(dev, "cannot deassert pipe reset\n"); 892 goto err_rst_pipe; 893 } 894 895 ret = reset_control_deassert(res->pipe_sticky_reset); 896 if (ret) { 897 dev_err(dev, "cannot deassert pipe sticky reset\n"); 898 goto err_rst_pipe_sticky; 899 } 900 901 usleep_range(10000, 12000); 902 903 ret = reset_control_deassert(res->axi_m_reset); 904 if (ret) { 905 dev_err(dev, "cannot deassert axi master reset\n"); 906 goto err_rst_axi_m; 907 } 908 909 ret = reset_control_deassert(res->axi_m_sticky_reset); 910 if (ret) { 911 dev_err(dev, "cannot deassert axi master sticky reset\n"); 912 goto err_rst_axi_m_sticky; 913 } 914 915 ret = reset_control_deassert(res->axi_s_reset); 916 if (ret) { 917 dev_err(dev, "cannot deassert axi slave reset\n"); 918 goto err_rst_axi_s; 919 } 920 921 ret = reset_control_deassert(res->pwr_reset); 922 if (ret) { 923 dev_err(dev, "cannot deassert power reset\n"); 924 goto err_rst_pwr; 925 } 926 927 ret = reset_control_deassert(res->ahb_reset); 928 if (ret) { 929 dev_err(dev, "cannot deassert ahb reset\n"); 930 goto err_rst_ahb; 931 } 932 933 usleep_range(10000, 12000); 934 935 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 936 if (ret) 937 goto err_clks; 938 939 /* enable PCIe clocks and resets */ 940 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 941 val &= ~BIT(0); 942 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 943 944 /* change DBI base address */ 945 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 946 947 /* MAC PHY_POWERDOWN MUX DISABLE */ 948 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 949 val &= ~BIT(29); 950 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 951 952 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 953 val |= BIT(4); 954 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 955 956 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 957 val |= BIT(31); 958 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 959 960 return 0; 961 962 err_clks: 963 reset_control_assert(res->ahb_reset); 964 err_rst_ahb: 965 reset_control_assert(res->pwr_reset); 966 err_rst_pwr: 967 reset_control_assert(res->axi_s_reset); 968 err_rst_axi_s: 969 reset_control_assert(res->axi_m_sticky_reset); 970 err_rst_axi_m_sticky: 971 reset_control_assert(res->axi_m_reset); 972 err_rst_axi_m: 973 reset_control_assert(res->pipe_sticky_reset); 974 err_rst_pipe_sticky: 975 reset_control_assert(res->pipe_reset); 976 err_rst_pipe: 977 reset_control_assert(res->phy_reset); 978 err_rst_phy: 979 reset_control_assert(res->phy_ahb_reset); 980 return ret; 981 } 982 983 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 984 { 985 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 986 struct dw_pcie *pci = pcie->pci; 987 struct device *dev = pci->dev; 988 int i; 989 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 990 "axi_m_sticky", "sticky", 991 "ahb", "sleep", }; 992 993 res->iface = devm_clk_get(dev, "iface"); 994 if (IS_ERR(res->iface)) 995 return PTR_ERR(res->iface); 996 997 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 998 if (IS_ERR(res->axi_m_clk)) 999 return PTR_ERR(res->axi_m_clk); 1000 1001 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 1002 if (IS_ERR(res->axi_s_clk)) 1003 return PTR_ERR(res->axi_s_clk); 1004 1005 res->ahb_clk = devm_clk_get(dev, "ahb"); 1006 if (IS_ERR(res->ahb_clk)) 1007 return PTR_ERR(res->ahb_clk); 1008 1009 res->aux_clk = devm_clk_get(dev, "aux"); 1010 if (IS_ERR(res->aux_clk)) 1011 return PTR_ERR(res->aux_clk); 1012 1013 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1014 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1015 if (IS_ERR(res->rst[i])) 1016 return PTR_ERR(res->rst[i]); 1017 } 1018 1019 return 0; 1020 } 1021 1022 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1023 { 1024 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1025 1026 clk_disable_unprepare(res->iface); 1027 clk_disable_unprepare(res->axi_m_clk); 1028 clk_disable_unprepare(res->axi_s_clk); 1029 clk_disable_unprepare(res->ahb_clk); 1030 clk_disable_unprepare(res->aux_clk); 1031 } 1032 1033 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1034 { 1035 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1036 struct dw_pcie *pci = pcie->pci; 1037 struct device *dev = pci->dev; 1038 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1039 int i, ret; 1040 u32 val; 1041 1042 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1043 ret = reset_control_assert(res->rst[i]); 1044 if (ret) { 1045 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1046 return ret; 1047 } 1048 } 1049 1050 usleep_range(2000, 2500); 1051 1052 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1053 ret = reset_control_deassert(res->rst[i]); 1054 if (ret) { 1055 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1056 ret); 1057 return ret; 1058 } 1059 } 1060 1061 /* 1062 * Don't have a way to see if the reset has completed. 1063 * Wait for some time. 1064 */ 1065 usleep_range(2000, 2500); 1066 1067 ret = clk_prepare_enable(res->iface); 1068 if (ret) { 1069 dev_err(dev, "cannot prepare/enable core clock\n"); 1070 goto err_clk_iface; 1071 } 1072 1073 ret = clk_prepare_enable(res->axi_m_clk); 1074 if (ret) { 1075 dev_err(dev, "cannot prepare/enable core clock\n"); 1076 goto err_clk_axi_m; 1077 } 1078 1079 ret = clk_prepare_enable(res->axi_s_clk); 1080 if (ret) { 1081 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1082 goto err_clk_axi_s; 1083 } 1084 1085 ret = clk_prepare_enable(res->ahb_clk); 1086 if (ret) { 1087 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1088 goto err_clk_ahb; 1089 } 1090 1091 ret = clk_prepare_enable(res->aux_clk); 1092 if (ret) { 1093 dev_err(dev, "cannot prepare/enable aux clock\n"); 1094 goto err_clk_aux; 1095 } 1096 1097 writel(SLV_ADDR_SPACE_SZ, 1098 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1099 1100 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1101 val &= ~BIT(0); 1102 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1103 1104 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1105 1106 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1107 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1108 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1109 pcie->parf + PCIE20_PARF_SYS_CTRL); 1110 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1111 1112 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1113 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1114 writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1115 1116 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1117 val &= ~PCI_EXP_LNKCAP_ASPMS; 1118 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1119 1120 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1121 PCI_EXP_DEVCTL2); 1122 1123 return 0; 1124 1125 err_clk_aux: 1126 clk_disable_unprepare(res->ahb_clk); 1127 err_clk_ahb: 1128 clk_disable_unprepare(res->axi_s_clk); 1129 err_clk_axi_s: 1130 clk_disable_unprepare(res->axi_m_clk); 1131 err_clk_axi_m: 1132 clk_disable_unprepare(res->iface); 1133 err_clk_iface: 1134 /* 1135 * Not checking for failure, will anyway return 1136 * the original failure in 'ret'. 1137 */ 1138 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1139 reset_control_assert(res->rst[i]); 1140 1141 return ret; 1142 } 1143 1144 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1145 { 1146 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1147 struct dw_pcie *pci = pcie->pci; 1148 struct device *dev = pci->dev; 1149 int ret; 1150 1151 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1152 if (IS_ERR(res->pci_reset)) 1153 return PTR_ERR(res->pci_reset); 1154 1155 res->supplies[0].supply = "vdda"; 1156 res->supplies[1].supply = "vddpe-3v3"; 1157 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1158 res->supplies); 1159 if (ret) 1160 return ret; 1161 1162 res->clks[0].id = "aux"; 1163 res->clks[1].id = "cfg"; 1164 res->clks[2].id = "bus_master"; 1165 res->clks[3].id = "bus_slave"; 1166 res->clks[4].id = "slave_q2a"; 1167 res->clks[5].id = "tbu"; 1168 if (of_device_is_compatible(dev->of_node, "qcom,pcie-sm8250")) { 1169 res->clks[6].id = "ddrss_sf_tbu"; 1170 res->num_clks = 7; 1171 } else { 1172 res->num_clks = 6; 1173 } 1174 1175 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 1176 if (ret < 0) 1177 return ret; 1178 1179 if (pcie->pipe_clk_need_muxing) { 1180 res->pipe_clk_src = devm_clk_get(dev, "pipe_mux"); 1181 if (IS_ERR(res->pipe_clk_src)) 1182 return PTR_ERR(res->pipe_clk_src); 1183 1184 res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe"); 1185 if (IS_ERR(res->phy_pipe_clk)) 1186 return PTR_ERR(res->phy_pipe_clk); 1187 1188 res->ref_clk_src = devm_clk_get(dev, "ref"); 1189 if (IS_ERR(res->ref_clk_src)) 1190 return PTR_ERR(res->ref_clk_src); 1191 } 1192 1193 res->pipe_clk = devm_clk_get(dev, "pipe"); 1194 return PTR_ERR_OR_ZERO(res->pipe_clk); 1195 } 1196 1197 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1198 { 1199 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1200 struct dw_pcie *pci = pcie->pci; 1201 struct device *dev = pci->dev; 1202 u32 val; 1203 int ret; 1204 1205 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1206 if (ret < 0) { 1207 dev_err(dev, "cannot enable regulators\n"); 1208 return ret; 1209 } 1210 1211 /* Set TCXO as clock source for pcie_pipe_clk_src */ 1212 if (pcie->pipe_clk_need_muxing) 1213 clk_set_parent(res->pipe_clk_src, res->ref_clk_src); 1214 1215 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 1216 if (ret < 0) 1217 goto err_disable_regulators; 1218 1219 ret = reset_control_assert(res->pci_reset); 1220 if (ret < 0) { 1221 dev_err(dev, "cannot deassert pci reset\n"); 1222 goto err_disable_clocks; 1223 } 1224 1225 usleep_range(1000, 1500); 1226 1227 ret = reset_control_deassert(res->pci_reset); 1228 if (ret < 0) { 1229 dev_err(dev, "cannot deassert pci reset\n"); 1230 goto err_disable_clocks; 1231 } 1232 1233 ret = clk_prepare_enable(res->pipe_clk); 1234 if (ret) { 1235 dev_err(dev, "cannot prepare/enable pipe clock\n"); 1236 goto err_disable_clocks; 1237 } 1238 1239 /* configure PCIe to RC mode */ 1240 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1241 1242 /* enable PCIe clocks and resets */ 1243 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1244 val &= ~BIT(0); 1245 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1246 1247 /* change DBI base address */ 1248 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1249 1250 /* MAC PHY_POWERDOWN MUX DISABLE */ 1251 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1252 val &= ~BIT(29); 1253 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1254 1255 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1256 val |= BIT(4); 1257 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1258 1259 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1260 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1261 val |= BIT(31); 1262 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1263 } 1264 1265 return 0; 1266 err_disable_clocks: 1267 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1268 err_disable_regulators: 1269 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1270 1271 return ret; 1272 } 1273 1274 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1275 { 1276 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1277 1278 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1279 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1280 } 1281 1282 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1283 { 1284 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1285 1286 /* Set pipe clock as clock source for pcie_pipe_clk_src */ 1287 if (pcie->pipe_clk_need_muxing) 1288 clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk); 1289 1290 return clk_prepare_enable(res->pipe_clk); 1291 } 1292 1293 static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie) 1294 { 1295 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1296 1297 clk_disable_unprepare(res->pipe_clk); 1298 } 1299 1300 static int qcom_pcie_link_up(struct dw_pcie *pci) 1301 { 1302 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1303 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1304 1305 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1306 } 1307 1308 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1309 { 1310 /* iommu map structure */ 1311 struct { 1312 u32 bdf; 1313 u32 phandle; 1314 u32 smmu_sid; 1315 u32 smmu_sid_len; 1316 } *map; 1317 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1318 struct device *dev = pcie->pci->dev; 1319 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1320 int i, nr_map, size = 0; 1321 u32 smmu_sid_base; 1322 1323 of_get_property(dev->of_node, "iommu-map", &size); 1324 if (!size) 1325 return 0; 1326 1327 map = kzalloc(size, GFP_KERNEL); 1328 if (!map) 1329 return -ENOMEM; 1330 1331 of_property_read_u32_array(dev->of_node, 1332 "iommu-map", (u32 *)map, size / sizeof(u32)); 1333 1334 nr_map = size / (sizeof(*map)); 1335 1336 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1337 1338 /* Registers need to be zero out first */ 1339 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1340 1341 /* Extract the SMMU SID base from the first entry of iommu-map */ 1342 smmu_sid_base = map[0].smmu_sid; 1343 1344 /* Look for an available entry to hold the mapping */ 1345 for (i = 0; i < nr_map; i++) { 1346 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1347 u32 val; 1348 u8 hash; 1349 1350 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1351 0); 1352 1353 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1354 1355 /* If the register is already populated, look for next available entry */ 1356 while (val) { 1357 u8 current_hash = hash++; 1358 u8 next_mask = 0xff; 1359 1360 /* If NEXT field is NULL then update it with next hash */ 1361 if (!(val & next_mask)) { 1362 val |= (u32)hash; 1363 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1364 } 1365 1366 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1367 } 1368 1369 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1370 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1371 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1372 } 1373 1374 kfree(map); 1375 1376 return 0; 1377 } 1378 1379 static int qcom_pcie_host_init(struct pcie_port *pp) 1380 { 1381 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1382 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1383 int ret; 1384 1385 qcom_ep_reset_assert(pcie); 1386 1387 ret = pcie->ops->init(pcie); 1388 if (ret) 1389 return ret; 1390 1391 ret = phy_power_on(pcie->phy); 1392 if (ret) 1393 goto err_deinit; 1394 1395 if (pcie->ops->post_init) { 1396 ret = pcie->ops->post_init(pcie); 1397 if (ret) 1398 goto err_disable_phy; 1399 } 1400 1401 qcom_ep_reset_deassert(pcie); 1402 1403 if (pcie->ops->config_sid) { 1404 ret = pcie->ops->config_sid(pcie); 1405 if (ret) 1406 goto err; 1407 } 1408 1409 return 0; 1410 1411 err: 1412 qcom_ep_reset_assert(pcie); 1413 if (pcie->ops->post_deinit) 1414 pcie->ops->post_deinit(pcie); 1415 err_disable_phy: 1416 phy_power_off(pcie->phy); 1417 err_deinit: 1418 pcie->ops->deinit(pcie); 1419 1420 return ret; 1421 } 1422 1423 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1424 .host_init = qcom_pcie_host_init, 1425 }; 1426 1427 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1428 static const struct qcom_pcie_ops ops_2_1_0 = { 1429 .get_resources = qcom_pcie_get_resources_2_1_0, 1430 .init = qcom_pcie_init_2_1_0, 1431 .deinit = qcom_pcie_deinit_2_1_0, 1432 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1433 }; 1434 1435 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1436 static const struct qcom_pcie_ops ops_1_0_0 = { 1437 .get_resources = qcom_pcie_get_resources_1_0_0, 1438 .init = qcom_pcie_init_1_0_0, 1439 .deinit = qcom_pcie_deinit_1_0_0, 1440 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1441 }; 1442 1443 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1444 static const struct qcom_pcie_ops ops_2_3_2 = { 1445 .get_resources = qcom_pcie_get_resources_2_3_2, 1446 .init = qcom_pcie_init_2_3_2, 1447 .post_init = qcom_pcie_post_init_2_3_2, 1448 .deinit = qcom_pcie_deinit_2_3_2, 1449 .post_deinit = qcom_pcie_post_deinit_2_3_2, 1450 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1451 }; 1452 1453 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1454 static const struct qcom_pcie_ops ops_2_4_0 = { 1455 .get_resources = qcom_pcie_get_resources_2_4_0, 1456 .init = qcom_pcie_init_2_4_0, 1457 .deinit = qcom_pcie_deinit_2_4_0, 1458 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1459 }; 1460 1461 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1462 static const struct qcom_pcie_ops ops_2_3_3 = { 1463 .get_resources = qcom_pcie_get_resources_2_3_3, 1464 .init = qcom_pcie_init_2_3_3, 1465 .deinit = qcom_pcie_deinit_2_3_3, 1466 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1467 }; 1468 1469 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1470 static const struct qcom_pcie_ops ops_2_7_0 = { 1471 .get_resources = qcom_pcie_get_resources_2_7_0, 1472 .init = qcom_pcie_init_2_7_0, 1473 .deinit = qcom_pcie_deinit_2_7_0, 1474 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1475 .post_init = qcom_pcie_post_init_2_7_0, 1476 .post_deinit = qcom_pcie_post_deinit_2_7_0, 1477 }; 1478 1479 /* Qcom IP rev.: 1.9.0 */ 1480 static const struct qcom_pcie_ops ops_1_9_0 = { 1481 .get_resources = qcom_pcie_get_resources_2_7_0, 1482 .init = qcom_pcie_init_2_7_0, 1483 .deinit = qcom_pcie_deinit_2_7_0, 1484 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1485 .post_init = qcom_pcie_post_init_2_7_0, 1486 .post_deinit = qcom_pcie_post_deinit_2_7_0, 1487 .config_sid = qcom_pcie_config_sid_sm8250, 1488 }; 1489 1490 static const struct qcom_pcie_cfg apq8084_cfg = { 1491 .ops = &ops_1_0_0, 1492 }; 1493 1494 static const struct qcom_pcie_cfg ipq8064_cfg = { 1495 .ops = &ops_2_1_0, 1496 }; 1497 1498 static const struct qcom_pcie_cfg msm8996_cfg = { 1499 .ops = &ops_2_3_2, 1500 }; 1501 1502 static const struct qcom_pcie_cfg ipq8074_cfg = { 1503 .ops = &ops_2_3_3, 1504 }; 1505 1506 static const struct qcom_pcie_cfg ipq4019_cfg = { 1507 .ops = &ops_2_4_0, 1508 }; 1509 1510 static const struct qcom_pcie_cfg sdm845_cfg = { 1511 .ops = &ops_2_7_0, 1512 }; 1513 1514 static const struct qcom_pcie_cfg sm8250_cfg = { 1515 .ops = &ops_1_9_0, 1516 }; 1517 1518 static const struct qcom_pcie_cfg sc7280_cfg = { 1519 .ops = &ops_1_9_0, 1520 .pipe_clk_need_muxing = true, 1521 }; 1522 1523 static const struct dw_pcie_ops dw_pcie_ops = { 1524 .link_up = qcom_pcie_link_up, 1525 .start_link = qcom_pcie_start_link, 1526 }; 1527 1528 static int qcom_pcie_probe(struct platform_device *pdev) 1529 { 1530 struct device *dev = &pdev->dev; 1531 struct pcie_port *pp; 1532 struct dw_pcie *pci; 1533 struct qcom_pcie *pcie; 1534 const struct qcom_pcie_cfg *pcie_cfg; 1535 int ret; 1536 1537 pcie_cfg = of_device_get_match_data(dev); 1538 if (!pcie_cfg || !pcie_cfg->ops) { 1539 dev_err(dev, "Invalid platform data\n"); 1540 return -EINVAL; 1541 } 1542 1543 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1544 if (!pcie) 1545 return -ENOMEM; 1546 1547 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1548 if (!pci) 1549 return -ENOMEM; 1550 1551 pm_runtime_enable(dev); 1552 ret = pm_runtime_get_sync(dev); 1553 if (ret < 0) 1554 goto err_pm_runtime_put; 1555 1556 pci->dev = dev; 1557 pci->ops = &dw_pcie_ops; 1558 pp = &pci->pp; 1559 1560 pcie->pci = pci; 1561 1562 pcie->ops = pcie_cfg->ops; 1563 pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing; 1564 1565 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1566 if (IS_ERR(pcie->reset)) { 1567 ret = PTR_ERR(pcie->reset); 1568 goto err_pm_runtime_put; 1569 } 1570 1571 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1572 if (IS_ERR(pcie->parf)) { 1573 ret = PTR_ERR(pcie->parf); 1574 goto err_pm_runtime_put; 1575 } 1576 1577 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1578 if (IS_ERR(pcie->elbi)) { 1579 ret = PTR_ERR(pcie->elbi); 1580 goto err_pm_runtime_put; 1581 } 1582 1583 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1584 if (IS_ERR(pcie->phy)) { 1585 ret = PTR_ERR(pcie->phy); 1586 goto err_pm_runtime_put; 1587 } 1588 1589 ret = pcie->ops->get_resources(pcie); 1590 if (ret) 1591 goto err_pm_runtime_put; 1592 1593 pp->ops = &qcom_pcie_dw_ops; 1594 1595 ret = phy_init(pcie->phy); 1596 if (ret) { 1597 pm_runtime_disable(&pdev->dev); 1598 goto err_pm_runtime_put; 1599 } 1600 1601 platform_set_drvdata(pdev, pcie); 1602 1603 ret = dw_pcie_host_init(pp); 1604 if (ret) { 1605 dev_err(dev, "cannot initialize host\n"); 1606 pm_runtime_disable(&pdev->dev); 1607 goto err_pm_runtime_put; 1608 } 1609 1610 return 0; 1611 1612 err_pm_runtime_put: 1613 pm_runtime_put(dev); 1614 pm_runtime_disable(dev); 1615 1616 return ret; 1617 } 1618 1619 static const struct of_device_id qcom_pcie_match[] = { 1620 { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg }, 1621 { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg }, 1622 { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg }, 1623 { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg }, 1624 { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg }, 1625 { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg }, 1626 { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg }, 1627 { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg }, 1628 { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg }, 1629 { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg }, 1630 { .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg }, 1631 { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg }, 1632 { } 1633 }; 1634 1635 static void qcom_fixup_class(struct pci_dev *dev) 1636 { 1637 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 1638 } 1639 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1640 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1641 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1642 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1643 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1644 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1645 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1646 1647 static struct platform_driver qcom_pcie_driver = { 1648 .probe = qcom_pcie_probe, 1649 .driver = { 1650 .name = "qcom-pcie", 1651 .suppress_bind_attrs = true, 1652 .of_match_table = qcom_pcie_match, 1653 }, 1654 }; 1655 builtin_platform_driver(qcom_pcie_driver); 1656