1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interconnect.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/iopoll.h> 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/of_device.h> 22 #include <linux/of_gpio.h> 23 #include <linux/pci.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/platform_device.h> 26 #include <linux/phy/pcie.h> 27 #include <linux/phy/phy.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/reset.h> 30 #include <linux/slab.h> 31 #include <linux/types.h> 32 33 #include "../../pci.h" 34 #include "pcie-designware.h" 35 36 #define PCIE20_PARF_SYS_CTRL 0x00 37 #define MST_WAKEUP_EN BIT(13) 38 #define SLV_WAKEUP_EN BIT(12) 39 #define MSTR_ACLK_CGC_DIS BIT(10) 40 #define SLV_ACLK_CGC_DIS BIT(9) 41 #define CORE_CLK_CGC_DIS BIT(6) 42 #define AUX_PWR_DET BIT(4) 43 #define L23_CLK_RMV_DIS BIT(2) 44 #define L1_CLK_RMV_DIS BIT(1) 45 46 #define PCIE20_PARF_PM_CTRL 0x20 47 #define REQ_NOT_ENTR_L1 BIT(5) 48 49 #define PCIE20_PARF_PHY_CTRL 0x40 50 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 51 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 52 53 #define PCIE20_PARF_PHY_REFCLK 0x4C 54 #define PHY_REFCLK_SSP_EN BIT(16) 55 #define PHY_REFCLK_USE_PAD BIT(12) 56 57 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 58 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 59 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 60 #define AHB_CLK_EN BIT(0) 61 #define MSTR_AXI_CLK_EN BIT(1) 62 #define BYPASS BIT(4) 63 64 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 65 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 66 #define PCIE20_PARF_LTSSM 0x1B0 67 #define PCIE20_PARF_SID_OFFSET 0x234 68 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 69 #define PCIE20_PARF_DEVICE_TYPE 0x1000 70 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 71 72 #define PCIE20_ELBI_SYS_CTRL 0x04 73 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 74 75 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 76 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 77 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 78 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 79 #define CFG_BRIDGE_SB_INIT BIT(0) 80 81 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ 82 250) 83 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ 84 1) 85 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 86 PCI_EXP_SLTCAP_PCP | \ 87 PCI_EXP_SLTCAP_MRLSP | \ 88 PCI_EXP_SLTCAP_AIP | \ 89 PCI_EXP_SLTCAP_PIP | \ 90 PCI_EXP_SLTCAP_HPS | \ 91 PCI_EXP_SLTCAP_HPC | \ 92 PCI_EXP_SLTCAP_EIP | \ 93 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 94 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 95 96 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 97 98 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 99 #define DBI_RO_WR_EN 1 100 101 #define PERST_DELAY_US 1000 102 /* PARF registers */ 103 #define PCIE20_PARF_PCS_DEEMPH 0x34 104 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 106 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 107 108 #define PCIE20_PARF_PCS_SWING 0x38 109 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 110 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 111 112 #define PCIE20_PARF_CONFIG_BITS 0x50 113 #define PHY_RX0_EQ(x) ((x) << 24) 114 115 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 116 #define SLV_ADDR_SPACE_SZ 0x10000000 117 118 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 119 120 #define DEVICE_TYPE_RC 0x4 121 122 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 123 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 124 125 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 126 127 struct qcom_pcie_resources_2_1_0 { 128 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 129 struct reset_control *pci_reset; 130 struct reset_control *axi_reset; 131 struct reset_control *ahb_reset; 132 struct reset_control *por_reset; 133 struct reset_control *phy_reset; 134 struct reset_control *ext_reset; 135 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 136 }; 137 138 struct qcom_pcie_resources_1_0_0 { 139 struct clk *iface; 140 struct clk *aux; 141 struct clk *master_bus; 142 struct clk *slave_bus; 143 struct reset_control *core; 144 struct regulator *vdda; 145 }; 146 147 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 148 struct qcom_pcie_resources_2_3_2 { 149 struct clk *aux_clk; 150 struct clk *master_clk; 151 struct clk *slave_clk; 152 struct clk *cfg_clk; 153 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 154 }; 155 156 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 157 struct qcom_pcie_resources_2_4_0 { 158 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 159 int num_clks; 160 struct reset_control *axi_m_reset; 161 struct reset_control *axi_s_reset; 162 struct reset_control *pipe_reset; 163 struct reset_control *axi_m_vmid_reset; 164 struct reset_control *axi_s_xpu_reset; 165 struct reset_control *parf_reset; 166 struct reset_control *phy_reset; 167 struct reset_control *axi_m_sticky_reset; 168 struct reset_control *pipe_sticky_reset; 169 struct reset_control *pwr_reset; 170 struct reset_control *ahb_reset; 171 struct reset_control *phy_ahb_reset; 172 }; 173 174 struct qcom_pcie_resources_2_3_3 { 175 struct clk *iface; 176 struct clk *axi_m_clk; 177 struct clk *axi_s_clk; 178 struct clk *ahb_clk; 179 struct clk *aux_clk; 180 struct reset_control *rst[7]; 181 }; 182 183 /* 6 clocks typically, 7 for sm8250 */ 184 struct qcom_pcie_resources_2_7_0 { 185 struct clk_bulk_data clks[12]; 186 int num_clks; 187 struct regulator_bulk_data supplies[2]; 188 struct reset_control *pci_reset; 189 }; 190 191 struct qcom_pcie_resources_2_9_0 { 192 struct clk_bulk_data clks[5]; 193 struct reset_control *rst; 194 }; 195 196 union qcom_pcie_resources { 197 struct qcom_pcie_resources_1_0_0 v1_0_0; 198 struct qcom_pcie_resources_2_1_0 v2_1_0; 199 struct qcom_pcie_resources_2_3_2 v2_3_2; 200 struct qcom_pcie_resources_2_3_3 v2_3_3; 201 struct qcom_pcie_resources_2_4_0 v2_4_0; 202 struct qcom_pcie_resources_2_7_0 v2_7_0; 203 struct qcom_pcie_resources_2_9_0 v2_9_0; 204 }; 205 206 struct qcom_pcie; 207 208 struct qcom_pcie_ops { 209 int (*get_resources)(struct qcom_pcie *pcie); 210 int (*init)(struct qcom_pcie *pcie); 211 int (*post_init)(struct qcom_pcie *pcie); 212 void (*deinit)(struct qcom_pcie *pcie); 213 void (*ltssm_enable)(struct qcom_pcie *pcie); 214 int (*config_sid)(struct qcom_pcie *pcie); 215 }; 216 217 struct qcom_pcie_cfg { 218 const struct qcom_pcie_ops *ops; 219 }; 220 221 struct qcom_pcie { 222 struct dw_pcie *pci; 223 void __iomem *parf; /* DT parf */ 224 void __iomem *elbi; /* DT elbi */ 225 union qcom_pcie_resources res; 226 struct phy *phy; 227 struct gpio_desc *reset; 228 struct icc_path *icc_mem; 229 const struct qcom_pcie_cfg *cfg; 230 }; 231 232 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 233 234 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 235 { 236 gpiod_set_value_cansleep(pcie->reset, 1); 237 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 238 } 239 240 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 241 { 242 /* Ensure that PERST has been asserted for at least 100 ms */ 243 msleep(100); 244 gpiod_set_value_cansleep(pcie->reset, 0); 245 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 246 } 247 248 static int qcom_pcie_start_link(struct dw_pcie *pci) 249 { 250 struct qcom_pcie *pcie = to_qcom_pcie(pci); 251 252 /* Enable Link Training state machine */ 253 if (pcie->cfg->ops->ltssm_enable) 254 pcie->cfg->ops->ltssm_enable(pcie); 255 256 return 0; 257 } 258 259 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 260 { 261 u32 val; 262 263 /* enable link training */ 264 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 265 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 266 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 267 } 268 269 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 270 { 271 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 272 struct dw_pcie *pci = pcie->pci; 273 struct device *dev = pci->dev; 274 int ret; 275 276 res->supplies[0].supply = "vdda"; 277 res->supplies[1].supply = "vdda_phy"; 278 res->supplies[2].supply = "vdda_refclk"; 279 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 280 res->supplies); 281 if (ret) 282 return ret; 283 284 res->clks[0].id = "iface"; 285 res->clks[1].id = "core"; 286 res->clks[2].id = "phy"; 287 res->clks[3].id = "aux"; 288 res->clks[4].id = "ref"; 289 290 /* iface, core, phy are required */ 291 ret = devm_clk_bulk_get(dev, 3, res->clks); 292 if (ret < 0) 293 return ret; 294 295 /* aux, ref are optional */ 296 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 297 if (ret < 0) 298 return ret; 299 300 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 301 if (IS_ERR(res->pci_reset)) 302 return PTR_ERR(res->pci_reset); 303 304 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 305 if (IS_ERR(res->axi_reset)) 306 return PTR_ERR(res->axi_reset); 307 308 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 309 if (IS_ERR(res->ahb_reset)) 310 return PTR_ERR(res->ahb_reset); 311 312 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 313 if (IS_ERR(res->por_reset)) 314 return PTR_ERR(res->por_reset); 315 316 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 317 if (IS_ERR(res->ext_reset)) 318 return PTR_ERR(res->ext_reset); 319 320 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 321 return PTR_ERR_OR_ZERO(res->phy_reset); 322 } 323 324 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 325 { 326 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 327 328 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 329 reset_control_assert(res->pci_reset); 330 reset_control_assert(res->axi_reset); 331 reset_control_assert(res->ahb_reset); 332 reset_control_assert(res->por_reset); 333 reset_control_assert(res->ext_reset); 334 reset_control_assert(res->phy_reset); 335 336 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 337 338 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 339 } 340 341 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 342 { 343 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 344 struct dw_pcie *pci = pcie->pci; 345 struct device *dev = pci->dev; 346 int ret; 347 348 /* reset the PCIe interface as uboot can leave it undefined state */ 349 reset_control_assert(res->pci_reset); 350 reset_control_assert(res->axi_reset); 351 reset_control_assert(res->ahb_reset); 352 reset_control_assert(res->por_reset); 353 reset_control_assert(res->ext_reset); 354 reset_control_assert(res->phy_reset); 355 356 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 357 if (ret < 0) { 358 dev_err(dev, "cannot enable regulators\n"); 359 return ret; 360 } 361 362 ret = reset_control_deassert(res->ahb_reset); 363 if (ret) { 364 dev_err(dev, "cannot deassert ahb reset\n"); 365 goto err_deassert_ahb; 366 } 367 368 ret = reset_control_deassert(res->ext_reset); 369 if (ret) { 370 dev_err(dev, "cannot deassert ext reset\n"); 371 goto err_deassert_ext; 372 } 373 374 ret = reset_control_deassert(res->phy_reset); 375 if (ret) { 376 dev_err(dev, "cannot deassert phy reset\n"); 377 goto err_deassert_phy; 378 } 379 380 ret = reset_control_deassert(res->pci_reset); 381 if (ret) { 382 dev_err(dev, "cannot deassert pci reset\n"); 383 goto err_deassert_pci; 384 } 385 386 ret = reset_control_deassert(res->por_reset); 387 if (ret) { 388 dev_err(dev, "cannot deassert por reset\n"); 389 goto err_deassert_por; 390 } 391 392 ret = reset_control_deassert(res->axi_reset); 393 if (ret) { 394 dev_err(dev, "cannot deassert axi reset\n"); 395 goto err_deassert_axi; 396 } 397 398 return 0; 399 400 err_deassert_axi: 401 reset_control_assert(res->por_reset); 402 err_deassert_por: 403 reset_control_assert(res->pci_reset); 404 err_deassert_pci: 405 reset_control_assert(res->phy_reset); 406 err_deassert_phy: 407 reset_control_assert(res->ext_reset); 408 err_deassert_ext: 409 reset_control_assert(res->ahb_reset); 410 err_deassert_ahb: 411 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 412 413 return ret; 414 } 415 416 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 417 { 418 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 419 struct dw_pcie *pci = pcie->pci; 420 struct device *dev = pci->dev; 421 struct device_node *node = dev->of_node; 422 u32 val; 423 int ret; 424 425 /* enable PCIe clocks and resets */ 426 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 427 val &= ~BIT(0); 428 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 429 430 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 431 if (ret) 432 return ret; 433 434 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 435 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 436 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 437 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 438 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 439 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 440 writel(PCS_SWING_TX_SWING_FULL(120) | 441 PCS_SWING_TX_SWING_LOW(120), 442 pcie->parf + PCIE20_PARF_PCS_SWING); 443 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 444 } 445 446 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 447 /* set TX termination offset */ 448 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 449 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 450 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 451 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 452 } 453 454 /* enable external reference clock */ 455 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 456 /* USE_PAD is required only for ipq806x */ 457 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 458 val &= ~PHY_REFCLK_USE_PAD; 459 val |= PHY_REFCLK_SSP_EN; 460 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 461 462 /* wait for clock acquisition */ 463 usleep_range(1000, 1500); 464 465 /* Set the Max TLP size to 2K, instead of using default of 4K */ 466 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 467 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 468 writel(CFG_BRIDGE_SB_INIT, 469 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 470 471 return 0; 472 } 473 474 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 475 { 476 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 477 struct dw_pcie *pci = pcie->pci; 478 struct device *dev = pci->dev; 479 480 res->vdda = devm_regulator_get(dev, "vdda"); 481 if (IS_ERR(res->vdda)) 482 return PTR_ERR(res->vdda); 483 484 res->iface = devm_clk_get(dev, "iface"); 485 if (IS_ERR(res->iface)) 486 return PTR_ERR(res->iface); 487 488 res->aux = devm_clk_get(dev, "aux"); 489 if (IS_ERR(res->aux)) 490 return PTR_ERR(res->aux); 491 492 res->master_bus = devm_clk_get(dev, "master_bus"); 493 if (IS_ERR(res->master_bus)) 494 return PTR_ERR(res->master_bus); 495 496 res->slave_bus = devm_clk_get(dev, "slave_bus"); 497 if (IS_ERR(res->slave_bus)) 498 return PTR_ERR(res->slave_bus); 499 500 res->core = devm_reset_control_get_exclusive(dev, "core"); 501 return PTR_ERR_OR_ZERO(res->core); 502 } 503 504 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 505 { 506 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 507 508 reset_control_assert(res->core); 509 clk_disable_unprepare(res->slave_bus); 510 clk_disable_unprepare(res->master_bus); 511 clk_disable_unprepare(res->iface); 512 clk_disable_unprepare(res->aux); 513 regulator_disable(res->vdda); 514 } 515 516 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 517 { 518 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 519 struct dw_pcie *pci = pcie->pci; 520 struct device *dev = pci->dev; 521 int ret; 522 523 ret = reset_control_deassert(res->core); 524 if (ret) { 525 dev_err(dev, "cannot deassert core reset\n"); 526 return ret; 527 } 528 529 ret = clk_prepare_enable(res->aux); 530 if (ret) { 531 dev_err(dev, "cannot prepare/enable aux clock\n"); 532 goto err_res; 533 } 534 535 ret = clk_prepare_enable(res->iface); 536 if (ret) { 537 dev_err(dev, "cannot prepare/enable iface clock\n"); 538 goto err_aux; 539 } 540 541 ret = clk_prepare_enable(res->master_bus); 542 if (ret) { 543 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 544 goto err_iface; 545 } 546 547 ret = clk_prepare_enable(res->slave_bus); 548 if (ret) { 549 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 550 goto err_master; 551 } 552 553 ret = regulator_enable(res->vdda); 554 if (ret) { 555 dev_err(dev, "cannot enable vdda regulator\n"); 556 goto err_slave; 557 } 558 559 return 0; 560 err_slave: 561 clk_disable_unprepare(res->slave_bus); 562 err_master: 563 clk_disable_unprepare(res->master_bus); 564 err_iface: 565 clk_disable_unprepare(res->iface); 566 err_aux: 567 clk_disable_unprepare(res->aux); 568 err_res: 569 reset_control_assert(res->core); 570 571 return ret; 572 } 573 574 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 575 { 576 /* change DBI base address */ 577 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 578 579 if (IS_ENABLED(CONFIG_PCI_MSI)) { 580 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 581 582 val |= BIT(31); 583 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 584 } 585 586 return 0; 587 } 588 589 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 590 { 591 u32 val; 592 593 /* enable link training */ 594 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 595 val |= BIT(8); 596 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 597 } 598 599 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 600 { 601 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 602 struct dw_pcie *pci = pcie->pci; 603 struct device *dev = pci->dev; 604 int ret; 605 606 res->supplies[0].supply = "vdda"; 607 res->supplies[1].supply = "vddpe-3v3"; 608 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 609 res->supplies); 610 if (ret) 611 return ret; 612 613 res->aux_clk = devm_clk_get(dev, "aux"); 614 if (IS_ERR(res->aux_clk)) 615 return PTR_ERR(res->aux_clk); 616 617 res->cfg_clk = devm_clk_get(dev, "cfg"); 618 if (IS_ERR(res->cfg_clk)) 619 return PTR_ERR(res->cfg_clk); 620 621 res->master_clk = devm_clk_get(dev, "bus_master"); 622 if (IS_ERR(res->master_clk)) 623 return PTR_ERR(res->master_clk); 624 625 res->slave_clk = devm_clk_get(dev, "bus_slave"); 626 if (IS_ERR(res->slave_clk)) 627 return PTR_ERR(res->slave_clk); 628 629 return 0; 630 } 631 632 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 633 { 634 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 635 636 clk_disable_unprepare(res->slave_clk); 637 clk_disable_unprepare(res->master_clk); 638 clk_disable_unprepare(res->cfg_clk); 639 clk_disable_unprepare(res->aux_clk); 640 641 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 642 } 643 644 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 645 { 646 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 647 struct dw_pcie *pci = pcie->pci; 648 struct device *dev = pci->dev; 649 int ret; 650 651 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 652 if (ret < 0) { 653 dev_err(dev, "cannot enable regulators\n"); 654 return ret; 655 } 656 657 ret = clk_prepare_enable(res->aux_clk); 658 if (ret) { 659 dev_err(dev, "cannot prepare/enable aux clock\n"); 660 goto err_aux_clk; 661 } 662 663 ret = clk_prepare_enable(res->cfg_clk); 664 if (ret) { 665 dev_err(dev, "cannot prepare/enable cfg clock\n"); 666 goto err_cfg_clk; 667 } 668 669 ret = clk_prepare_enable(res->master_clk); 670 if (ret) { 671 dev_err(dev, "cannot prepare/enable master clock\n"); 672 goto err_master_clk; 673 } 674 675 ret = clk_prepare_enable(res->slave_clk); 676 if (ret) { 677 dev_err(dev, "cannot prepare/enable slave clock\n"); 678 goto err_slave_clk; 679 } 680 681 return 0; 682 683 err_slave_clk: 684 clk_disable_unprepare(res->master_clk); 685 err_master_clk: 686 clk_disable_unprepare(res->cfg_clk); 687 err_cfg_clk: 688 clk_disable_unprepare(res->aux_clk); 689 690 err_aux_clk: 691 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 692 693 return ret; 694 } 695 696 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 697 { 698 u32 val; 699 700 /* enable PCIe clocks and resets */ 701 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 702 val &= ~BIT(0); 703 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 704 705 /* change DBI base address */ 706 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 707 708 /* MAC PHY_POWERDOWN MUX DISABLE */ 709 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 710 val &= ~BIT(29); 711 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 712 713 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 714 val |= BIT(4); 715 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 716 717 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 718 val |= BIT(31); 719 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 720 721 return 0; 722 } 723 724 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 725 { 726 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 727 struct dw_pcie *pci = pcie->pci; 728 struct device *dev = pci->dev; 729 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 730 int ret; 731 732 res->clks[0].id = "aux"; 733 res->clks[1].id = "master_bus"; 734 res->clks[2].id = "slave_bus"; 735 res->clks[3].id = "iface"; 736 737 /* qcom,pcie-ipq4019 is defined without "iface" */ 738 res->num_clks = is_ipq ? 3 : 4; 739 740 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 741 if (ret < 0) 742 return ret; 743 744 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 745 if (IS_ERR(res->axi_m_reset)) 746 return PTR_ERR(res->axi_m_reset); 747 748 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 749 if (IS_ERR(res->axi_s_reset)) 750 return PTR_ERR(res->axi_s_reset); 751 752 if (is_ipq) { 753 /* 754 * These resources relates to the PHY or are secure clocks, but 755 * are controlled here for IPQ4019 756 */ 757 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 758 if (IS_ERR(res->pipe_reset)) 759 return PTR_ERR(res->pipe_reset); 760 761 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 762 "axi_m_vmid"); 763 if (IS_ERR(res->axi_m_vmid_reset)) 764 return PTR_ERR(res->axi_m_vmid_reset); 765 766 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 767 "axi_s_xpu"); 768 if (IS_ERR(res->axi_s_xpu_reset)) 769 return PTR_ERR(res->axi_s_xpu_reset); 770 771 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 772 if (IS_ERR(res->parf_reset)) 773 return PTR_ERR(res->parf_reset); 774 775 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 776 if (IS_ERR(res->phy_reset)) 777 return PTR_ERR(res->phy_reset); 778 } 779 780 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 781 "axi_m_sticky"); 782 if (IS_ERR(res->axi_m_sticky_reset)) 783 return PTR_ERR(res->axi_m_sticky_reset); 784 785 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 786 "pipe_sticky"); 787 if (IS_ERR(res->pipe_sticky_reset)) 788 return PTR_ERR(res->pipe_sticky_reset); 789 790 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 791 if (IS_ERR(res->pwr_reset)) 792 return PTR_ERR(res->pwr_reset); 793 794 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 795 if (IS_ERR(res->ahb_reset)) 796 return PTR_ERR(res->ahb_reset); 797 798 if (is_ipq) { 799 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 800 if (IS_ERR(res->phy_ahb_reset)) 801 return PTR_ERR(res->phy_ahb_reset); 802 } 803 804 return 0; 805 } 806 807 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 808 { 809 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 810 811 reset_control_assert(res->axi_m_reset); 812 reset_control_assert(res->axi_s_reset); 813 reset_control_assert(res->pipe_reset); 814 reset_control_assert(res->pipe_sticky_reset); 815 reset_control_assert(res->phy_reset); 816 reset_control_assert(res->phy_ahb_reset); 817 reset_control_assert(res->axi_m_sticky_reset); 818 reset_control_assert(res->pwr_reset); 819 reset_control_assert(res->ahb_reset); 820 clk_bulk_disable_unprepare(res->num_clks, res->clks); 821 } 822 823 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 824 { 825 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 826 struct dw_pcie *pci = pcie->pci; 827 struct device *dev = pci->dev; 828 int ret; 829 830 ret = reset_control_assert(res->axi_m_reset); 831 if (ret) { 832 dev_err(dev, "cannot assert axi master reset\n"); 833 return ret; 834 } 835 836 ret = reset_control_assert(res->axi_s_reset); 837 if (ret) { 838 dev_err(dev, "cannot assert axi slave reset\n"); 839 return ret; 840 } 841 842 usleep_range(10000, 12000); 843 844 ret = reset_control_assert(res->pipe_reset); 845 if (ret) { 846 dev_err(dev, "cannot assert pipe reset\n"); 847 return ret; 848 } 849 850 ret = reset_control_assert(res->pipe_sticky_reset); 851 if (ret) { 852 dev_err(dev, "cannot assert pipe sticky reset\n"); 853 return ret; 854 } 855 856 ret = reset_control_assert(res->phy_reset); 857 if (ret) { 858 dev_err(dev, "cannot assert phy reset\n"); 859 return ret; 860 } 861 862 ret = reset_control_assert(res->phy_ahb_reset); 863 if (ret) { 864 dev_err(dev, "cannot assert phy ahb reset\n"); 865 return ret; 866 } 867 868 usleep_range(10000, 12000); 869 870 ret = reset_control_assert(res->axi_m_sticky_reset); 871 if (ret) { 872 dev_err(dev, "cannot assert axi master sticky reset\n"); 873 return ret; 874 } 875 876 ret = reset_control_assert(res->pwr_reset); 877 if (ret) { 878 dev_err(dev, "cannot assert power reset\n"); 879 return ret; 880 } 881 882 ret = reset_control_assert(res->ahb_reset); 883 if (ret) { 884 dev_err(dev, "cannot assert ahb reset\n"); 885 return ret; 886 } 887 888 usleep_range(10000, 12000); 889 890 ret = reset_control_deassert(res->phy_ahb_reset); 891 if (ret) { 892 dev_err(dev, "cannot deassert phy ahb reset\n"); 893 return ret; 894 } 895 896 ret = reset_control_deassert(res->phy_reset); 897 if (ret) { 898 dev_err(dev, "cannot deassert phy reset\n"); 899 goto err_rst_phy; 900 } 901 902 ret = reset_control_deassert(res->pipe_reset); 903 if (ret) { 904 dev_err(dev, "cannot deassert pipe reset\n"); 905 goto err_rst_pipe; 906 } 907 908 ret = reset_control_deassert(res->pipe_sticky_reset); 909 if (ret) { 910 dev_err(dev, "cannot deassert pipe sticky reset\n"); 911 goto err_rst_pipe_sticky; 912 } 913 914 usleep_range(10000, 12000); 915 916 ret = reset_control_deassert(res->axi_m_reset); 917 if (ret) { 918 dev_err(dev, "cannot deassert axi master reset\n"); 919 goto err_rst_axi_m; 920 } 921 922 ret = reset_control_deassert(res->axi_m_sticky_reset); 923 if (ret) { 924 dev_err(dev, "cannot deassert axi master sticky reset\n"); 925 goto err_rst_axi_m_sticky; 926 } 927 928 ret = reset_control_deassert(res->axi_s_reset); 929 if (ret) { 930 dev_err(dev, "cannot deassert axi slave reset\n"); 931 goto err_rst_axi_s; 932 } 933 934 ret = reset_control_deassert(res->pwr_reset); 935 if (ret) { 936 dev_err(dev, "cannot deassert power reset\n"); 937 goto err_rst_pwr; 938 } 939 940 ret = reset_control_deassert(res->ahb_reset); 941 if (ret) { 942 dev_err(dev, "cannot deassert ahb reset\n"); 943 goto err_rst_ahb; 944 } 945 946 usleep_range(10000, 12000); 947 948 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 949 if (ret) 950 goto err_clks; 951 952 return 0; 953 954 err_clks: 955 reset_control_assert(res->ahb_reset); 956 err_rst_ahb: 957 reset_control_assert(res->pwr_reset); 958 err_rst_pwr: 959 reset_control_assert(res->axi_s_reset); 960 err_rst_axi_s: 961 reset_control_assert(res->axi_m_sticky_reset); 962 err_rst_axi_m_sticky: 963 reset_control_assert(res->axi_m_reset); 964 err_rst_axi_m: 965 reset_control_assert(res->pipe_sticky_reset); 966 err_rst_pipe_sticky: 967 reset_control_assert(res->pipe_reset); 968 err_rst_pipe: 969 reset_control_assert(res->phy_reset); 970 err_rst_phy: 971 reset_control_assert(res->phy_ahb_reset); 972 return ret; 973 } 974 975 static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) 976 { 977 u32 val; 978 979 /* enable PCIe clocks and resets */ 980 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 981 val &= ~BIT(0); 982 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 983 984 /* change DBI base address */ 985 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 986 987 /* MAC PHY_POWERDOWN MUX DISABLE */ 988 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 989 val &= ~BIT(29); 990 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 991 992 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 993 val |= BIT(4); 994 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 995 996 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 997 val |= BIT(31); 998 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 999 1000 return 0; 1001 } 1002 1003 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 1004 { 1005 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1006 struct dw_pcie *pci = pcie->pci; 1007 struct device *dev = pci->dev; 1008 int i; 1009 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 1010 "axi_m_sticky", "sticky", 1011 "ahb", "sleep", }; 1012 1013 res->iface = devm_clk_get(dev, "iface"); 1014 if (IS_ERR(res->iface)) 1015 return PTR_ERR(res->iface); 1016 1017 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 1018 if (IS_ERR(res->axi_m_clk)) 1019 return PTR_ERR(res->axi_m_clk); 1020 1021 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 1022 if (IS_ERR(res->axi_s_clk)) 1023 return PTR_ERR(res->axi_s_clk); 1024 1025 res->ahb_clk = devm_clk_get(dev, "ahb"); 1026 if (IS_ERR(res->ahb_clk)) 1027 return PTR_ERR(res->ahb_clk); 1028 1029 res->aux_clk = devm_clk_get(dev, "aux"); 1030 if (IS_ERR(res->aux_clk)) 1031 return PTR_ERR(res->aux_clk); 1032 1033 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1034 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1035 if (IS_ERR(res->rst[i])) 1036 return PTR_ERR(res->rst[i]); 1037 } 1038 1039 return 0; 1040 } 1041 1042 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1043 { 1044 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1045 1046 clk_disable_unprepare(res->iface); 1047 clk_disable_unprepare(res->axi_m_clk); 1048 clk_disable_unprepare(res->axi_s_clk); 1049 clk_disable_unprepare(res->ahb_clk); 1050 clk_disable_unprepare(res->aux_clk); 1051 } 1052 1053 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1054 { 1055 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1056 struct dw_pcie *pci = pcie->pci; 1057 struct device *dev = pci->dev; 1058 int i, ret; 1059 1060 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1061 ret = reset_control_assert(res->rst[i]); 1062 if (ret) { 1063 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1064 return ret; 1065 } 1066 } 1067 1068 usleep_range(2000, 2500); 1069 1070 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1071 ret = reset_control_deassert(res->rst[i]); 1072 if (ret) { 1073 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1074 ret); 1075 return ret; 1076 } 1077 } 1078 1079 /* 1080 * Don't have a way to see if the reset has completed. 1081 * Wait for some time. 1082 */ 1083 usleep_range(2000, 2500); 1084 1085 ret = clk_prepare_enable(res->iface); 1086 if (ret) { 1087 dev_err(dev, "cannot prepare/enable core clock\n"); 1088 goto err_clk_iface; 1089 } 1090 1091 ret = clk_prepare_enable(res->axi_m_clk); 1092 if (ret) { 1093 dev_err(dev, "cannot prepare/enable core clock\n"); 1094 goto err_clk_axi_m; 1095 } 1096 1097 ret = clk_prepare_enable(res->axi_s_clk); 1098 if (ret) { 1099 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1100 goto err_clk_axi_s; 1101 } 1102 1103 ret = clk_prepare_enable(res->ahb_clk); 1104 if (ret) { 1105 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1106 goto err_clk_ahb; 1107 } 1108 1109 ret = clk_prepare_enable(res->aux_clk); 1110 if (ret) { 1111 dev_err(dev, "cannot prepare/enable aux clock\n"); 1112 goto err_clk_aux; 1113 } 1114 1115 return 0; 1116 1117 err_clk_aux: 1118 clk_disable_unprepare(res->ahb_clk); 1119 err_clk_ahb: 1120 clk_disable_unprepare(res->axi_s_clk); 1121 err_clk_axi_s: 1122 clk_disable_unprepare(res->axi_m_clk); 1123 err_clk_axi_m: 1124 clk_disable_unprepare(res->iface); 1125 err_clk_iface: 1126 /* 1127 * Not checking for failure, will anyway return 1128 * the original failure in 'ret'. 1129 */ 1130 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1131 reset_control_assert(res->rst[i]); 1132 1133 return ret; 1134 } 1135 1136 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 1137 { 1138 struct dw_pcie *pci = pcie->pci; 1139 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1140 u32 val; 1141 1142 writel(SLV_ADDR_SPACE_SZ, 1143 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1144 1145 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1146 val &= ~BIT(0); 1147 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1148 1149 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1150 1151 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1152 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1153 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1154 pcie->parf + PCIE20_PARF_SYS_CTRL); 1155 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1156 1157 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1158 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1159 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1160 1161 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1162 val &= ~PCI_EXP_LNKCAP_ASPMS; 1163 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1164 1165 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1166 PCI_EXP_DEVCTL2); 1167 1168 return 0; 1169 } 1170 1171 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1172 { 1173 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1174 struct dw_pcie *pci = pcie->pci; 1175 struct device *dev = pci->dev; 1176 unsigned int num_clks, num_opt_clks; 1177 unsigned int idx; 1178 int ret; 1179 1180 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1181 if (IS_ERR(res->pci_reset)) 1182 return PTR_ERR(res->pci_reset); 1183 1184 res->supplies[0].supply = "vdda"; 1185 res->supplies[1].supply = "vddpe-3v3"; 1186 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1187 res->supplies); 1188 if (ret) 1189 return ret; 1190 1191 idx = 0; 1192 res->clks[idx++].id = "aux"; 1193 res->clks[idx++].id = "cfg"; 1194 res->clks[idx++].id = "bus_master"; 1195 res->clks[idx++].id = "bus_slave"; 1196 res->clks[idx++].id = "slave_q2a"; 1197 1198 num_clks = idx; 1199 1200 ret = devm_clk_bulk_get(dev, num_clks, res->clks); 1201 if (ret < 0) 1202 return ret; 1203 1204 res->clks[idx++].id = "tbu"; 1205 res->clks[idx++].id = "ddrss_sf_tbu"; 1206 res->clks[idx++].id = "aggre0"; 1207 res->clks[idx++].id = "aggre1"; 1208 res->clks[idx++].id = "noc_aggr_4"; 1209 res->clks[idx++].id = "noc_aggr_south_sf"; 1210 res->clks[idx++].id = "cnoc_qx"; 1211 1212 num_opt_clks = idx - num_clks; 1213 res->num_clks = idx; 1214 1215 ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); 1216 if (ret < 0) 1217 return ret; 1218 1219 return 0; 1220 } 1221 1222 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1223 { 1224 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1225 struct dw_pcie *pci = pcie->pci; 1226 struct device *dev = pci->dev; 1227 u32 val; 1228 int ret; 1229 1230 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1231 if (ret < 0) { 1232 dev_err(dev, "cannot enable regulators\n"); 1233 return ret; 1234 } 1235 1236 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 1237 if (ret < 0) 1238 goto err_disable_regulators; 1239 1240 ret = reset_control_assert(res->pci_reset); 1241 if (ret < 0) { 1242 dev_err(dev, "cannot assert pci reset\n"); 1243 goto err_disable_clocks; 1244 } 1245 1246 usleep_range(1000, 1500); 1247 1248 ret = reset_control_deassert(res->pci_reset); 1249 if (ret < 0) { 1250 dev_err(dev, "cannot deassert pci reset\n"); 1251 goto err_disable_clocks; 1252 } 1253 1254 /* Wait for reset to complete, required on SM8450 */ 1255 usleep_range(1000, 1500); 1256 1257 /* configure PCIe to RC mode */ 1258 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1259 1260 /* enable PCIe clocks and resets */ 1261 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1262 val &= ~BIT(0); 1263 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1264 1265 /* change DBI base address */ 1266 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1267 1268 /* MAC PHY_POWERDOWN MUX DISABLE */ 1269 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1270 val &= ~BIT(29); 1271 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1272 1273 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1274 val |= BIT(4); 1275 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1276 1277 /* Enable L1 and L1SS */ 1278 val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); 1279 val &= ~REQ_NOT_ENTR_L1; 1280 writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); 1281 1282 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1283 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1284 val |= BIT(31); 1285 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1286 } 1287 1288 return 0; 1289 err_disable_clocks: 1290 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1291 err_disable_regulators: 1292 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1293 1294 return ret; 1295 } 1296 1297 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1298 { 1299 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1300 1301 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1302 1303 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1304 } 1305 1306 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1307 { 1308 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1309 struct dw_pcie *pci = pcie->pci; 1310 struct device *dev = pci->dev; 1311 int ret; 1312 1313 res->clks[0].id = "iface"; 1314 res->clks[1].id = "axi_m"; 1315 res->clks[2].id = "axi_s"; 1316 res->clks[3].id = "axi_bridge"; 1317 res->clks[4].id = "rchng"; 1318 1319 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1320 if (ret < 0) 1321 return ret; 1322 1323 res->rst = devm_reset_control_array_get_exclusive(dev); 1324 if (IS_ERR(res->rst)) 1325 return PTR_ERR(res->rst); 1326 1327 return 0; 1328 } 1329 1330 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1331 { 1332 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1333 1334 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1335 } 1336 1337 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1338 { 1339 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1340 struct device *dev = pcie->pci->dev; 1341 int ret; 1342 1343 ret = reset_control_assert(res->rst); 1344 if (ret) { 1345 dev_err(dev, "reset assert failed (%d)\n", ret); 1346 return ret; 1347 } 1348 1349 /* 1350 * Delay periods before and after reset deassert are working values 1351 * from downstream Codeaurora kernel 1352 */ 1353 usleep_range(2000, 2500); 1354 1355 ret = reset_control_deassert(res->rst); 1356 if (ret) { 1357 dev_err(dev, "reset deassert failed (%d)\n", ret); 1358 return ret; 1359 } 1360 1361 usleep_range(2000, 2500); 1362 1363 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1364 } 1365 1366 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1367 { 1368 struct dw_pcie *pci = pcie->pci; 1369 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1370 u32 val; 1371 int i; 1372 1373 writel(SLV_ADDR_SPACE_SZ, 1374 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1375 1376 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1377 val &= ~BIT(0); 1378 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1379 1380 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1381 1382 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1383 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1384 pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1385 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1386 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1387 pci->dbi_base + GEN3_RELATED_OFF); 1388 1389 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1390 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1391 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1392 pcie->parf + PCIE20_PARF_SYS_CTRL); 1393 1394 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1395 1396 dw_pcie_dbi_ro_wr_en(pci); 1397 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1398 1399 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1400 val &= ~PCI_EXP_LNKCAP_ASPMS; 1401 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1402 1403 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1404 PCI_EXP_DEVCTL2); 1405 1406 for (i = 0; i < 256; i++) 1407 writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1408 1409 return 0; 1410 } 1411 1412 static int qcom_pcie_link_up(struct dw_pcie *pci) 1413 { 1414 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1415 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1416 1417 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1418 } 1419 1420 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1421 { 1422 /* iommu map structure */ 1423 struct { 1424 u32 bdf; 1425 u32 phandle; 1426 u32 smmu_sid; 1427 u32 smmu_sid_len; 1428 } *map; 1429 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1430 struct device *dev = pcie->pci->dev; 1431 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1432 int i, nr_map, size = 0; 1433 u32 smmu_sid_base; 1434 1435 of_get_property(dev->of_node, "iommu-map", &size); 1436 if (!size) 1437 return 0; 1438 1439 map = kzalloc(size, GFP_KERNEL); 1440 if (!map) 1441 return -ENOMEM; 1442 1443 of_property_read_u32_array(dev->of_node, 1444 "iommu-map", (u32 *)map, size / sizeof(u32)); 1445 1446 nr_map = size / (sizeof(*map)); 1447 1448 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1449 1450 /* Registers need to be zero out first */ 1451 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1452 1453 /* Extract the SMMU SID base from the first entry of iommu-map */ 1454 smmu_sid_base = map[0].smmu_sid; 1455 1456 /* Look for an available entry to hold the mapping */ 1457 for (i = 0; i < nr_map; i++) { 1458 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1459 u32 val; 1460 u8 hash; 1461 1462 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1463 0); 1464 1465 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1466 1467 /* If the register is already populated, look for next available entry */ 1468 while (val) { 1469 u8 current_hash = hash++; 1470 u8 next_mask = 0xff; 1471 1472 /* If NEXT field is NULL then update it with next hash */ 1473 if (!(val & next_mask)) { 1474 val |= (u32)hash; 1475 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1476 } 1477 1478 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1479 } 1480 1481 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1482 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1483 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1484 } 1485 1486 kfree(map); 1487 1488 return 0; 1489 } 1490 1491 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1492 { 1493 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1494 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1495 int ret; 1496 1497 qcom_ep_reset_assert(pcie); 1498 1499 ret = pcie->cfg->ops->init(pcie); 1500 if (ret) 1501 return ret; 1502 1503 ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1504 if (ret) 1505 goto err_deinit; 1506 1507 ret = phy_power_on(pcie->phy); 1508 if (ret) 1509 goto err_deinit; 1510 1511 if (pcie->cfg->ops->post_init) { 1512 ret = pcie->cfg->ops->post_init(pcie); 1513 if (ret) 1514 goto err_disable_phy; 1515 } 1516 1517 qcom_ep_reset_deassert(pcie); 1518 1519 if (pcie->cfg->ops->config_sid) { 1520 ret = pcie->cfg->ops->config_sid(pcie); 1521 if (ret) 1522 goto err_assert_reset; 1523 } 1524 1525 return 0; 1526 1527 err_assert_reset: 1528 qcom_ep_reset_assert(pcie); 1529 err_disable_phy: 1530 phy_power_off(pcie->phy); 1531 err_deinit: 1532 pcie->cfg->ops->deinit(pcie); 1533 1534 return ret; 1535 } 1536 1537 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1538 .host_init = qcom_pcie_host_init, 1539 }; 1540 1541 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1542 static const struct qcom_pcie_ops ops_2_1_0 = { 1543 .get_resources = qcom_pcie_get_resources_2_1_0, 1544 .init = qcom_pcie_init_2_1_0, 1545 .post_init = qcom_pcie_post_init_2_1_0, 1546 .deinit = qcom_pcie_deinit_2_1_0, 1547 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1548 }; 1549 1550 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1551 static const struct qcom_pcie_ops ops_1_0_0 = { 1552 .get_resources = qcom_pcie_get_resources_1_0_0, 1553 .init = qcom_pcie_init_1_0_0, 1554 .post_init = qcom_pcie_post_init_1_0_0, 1555 .deinit = qcom_pcie_deinit_1_0_0, 1556 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1557 }; 1558 1559 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1560 static const struct qcom_pcie_ops ops_2_3_2 = { 1561 .get_resources = qcom_pcie_get_resources_2_3_2, 1562 .init = qcom_pcie_init_2_3_2, 1563 .post_init = qcom_pcie_post_init_2_3_2, 1564 .deinit = qcom_pcie_deinit_2_3_2, 1565 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1566 }; 1567 1568 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1569 static const struct qcom_pcie_ops ops_2_4_0 = { 1570 .get_resources = qcom_pcie_get_resources_2_4_0, 1571 .init = qcom_pcie_init_2_4_0, 1572 .post_init = qcom_pcie_post_init_2_4_0, 1573 .deinit = qcom_pcie_deinit_2_4_0, 1574 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1575 }; 1576 1577 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1578 static const struct qcom_pcie_ops ops_2_3_3 = { 1579 .get_resources = qcom_pcie_get_resources_2_3_3, 1580 .init = qcom_pcie_init_2_3_3, 1581 .post_init = qcom_pcie_post_init_2_3_3, 1582 .deinit = qcom_pcie_deinit_2_3_3, 1583 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1584 }; 1585 1586 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1587 static const struct qcom_pcie_ops ops_2_7_0 = { 1588 .get_resources = qcom_pcie_get_resources_2_7_0, 1589 .init = qcom_pcie_init_2_7_0, 1590 .deinit = qcom_pcie_deinit_2_7_0, 1591 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1592 }; 1593 1594 /* Qcom IP rev.: 1.9.0 */ 1595 static const struct qcom_pcie_ops ops_1_9_0 = { 1596 .get_resources = qcom_pcie_get_resources_2_7_0, 1597 .init = qcom_pcie_init_2_7_0, 1598 .deinit = qcom_pcie_deinit_2_7_0, 1599 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1600 .config_sid = qcom_pcie_config_sid_sm8250, 1601 }; 1602 1603 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1604 static const struct qcom_pcie_ops ops_2_9_0 = { 1605 .get_resources = qcom_pcie_get_resources_2_9_0, 1606 .init = qcom_pcie_init_2_9_0, 1607 .post_init = qcom_pcie_post_init_2_9_0, 1608 .deinit = qcom_pcie_deinit_2_9_0, 1609 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1610 }; 1611 1612 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1613 .ops = &ops_1_0_0, 1614 }; 1615 1616 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1617 .ops = &ops_1_9_0, 1618 }; 1619 1620 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1621 .ops = &ops_2_1_0, 1622 }; 1623 1624 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1625 .ops = &ops_2_3_2, 1626 }; 1627 1628 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1629 .ops = &ops_2_3_3, 1630 }; 1631 1632 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1633 .ops = &ops_2_4_0, 1634 }; 1635 1636 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1637 .ops = &ops_2_7_0, 1638 }; 1639 1640 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1641 .ops = &ops_2_9_0, 1642 }; 1643 1644 static const struct dw_pcie_ops dw_pcie_ops = { 1645 .link_up = qcom_pcie_link_up, 1646 .start_link = qcom_pcie_start_link, 1647 }; 1648 1649 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1650 { 1651 struct dw_pcie *pci = pcie->pci; 1652 int ret; 1653 1654 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1655 if (IS_ERR(pcie->icc_mem)) 1656 return PTR_ERR(pcie->icc_mem); 1657 1658 /* 1659 * Some Qualcomm platforms require interconnect bandwidth constraints 1660 * to be set before enabling interconnect clocks. 1661 * 1662 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1663 * for the pcie-mem path. 1664 */ 1665 ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250)); 1666 if (ret) { 1667 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1668 ret); 1669 return ret; 1670 } 1671 1672 return 0; 1673 } 1674 1675 static void qcom_pcie_icc_update(struct qcom_pcie *pcie) 1676 { 1677 struct dw_pcie *pci = pcie->pci; 1678 u32 offset, status, bw; 1679 int speed, width; 1680 int ret; 1681 1682 if (!pcie->icc_mem) 1683 return; 1684 1685 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1686 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1687 1688 /* Only update constraints if link is up. */ 1689 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1690 return; 1691 1692 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1693 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1694 1695 switch (speed) { 1696 case 1: 1697 bw = MBps_to_icc(250); 1698 break; 1699 case 2: 1700 bw = MBps_to_icc(500); 1701 break; 1702 default: 1703 WARN_ON_ONCE(1); 1704 fallthrough; 1705 case 3: 1706 bw = MBps_to_icc(985); 1707 break; 1708 } 1709 1710 ret = icc_set_bw(pcie->icc_mem, 0, width * bw); 1711 if (ret) { 1712 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1713 ret); 1714 } 1715 } 1716 1717 static int qcom_pcie_probe(struct platform_device *pdev) 1718 { 1719 struct device *dev = &pdev->dev; 1720 struct dw_pcie_rp *pp; 1721 struct dw_pcie *pci; 1722 struct qcom_pcie *pcie; 1723 const struct qcom_pcie_cfg *pcie_cfg; 1724 int ret; 1725 1726 pcie_cfg = of_device_get_match_data(dev); 1727 if (!pcie_cfg || !pcie_cfg->ops) { 1728 dev_err(dev, "Invalid platform data\n"); 1729 return -EINVAL; 1730 } 1731 1732 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1733 if (!pcie) 1734 return -ENOMEM; 1735 1736 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1737 if (!pci) 1738 return -ENOMEM; 1739 1740 pm_runtime_enable(dev); 1741 ret = pm_runtime_get_sync(dev); 1742 if (ret < 0) 1743 goto err_pm_runtime_put; 1744 1745 pci->dev = dev; 1746 pci->ops = &dw_pcie_ops; 1747 pp = &pci->pp; 1748 1749 pcie->pci = pci; 1750 1751 pcie->cfg = pcie_cfg; 1752 1753 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1754 if (IS_ERR(pcie->reset)) { 1755 ret = PTR_ERR(pcie->reset); 1756 goto err_pm_runtime_put; 1757 } 1758 1759 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1760 if (IS_ERR(pcie->parf)) { 1761 ret = PTR_ERR(pcie->parf); 1762 goto err_pm_runtime_put; 1763 } 1764 1765 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1766 if (IS_ERR(pcie->elbi)) { 1767 ret = PTR_ERR(pcie->elbi); 1768 goto err_pm_runtime_put; 1769 } 1770 1771 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1772 if (IS_ERR(pcie->phy)) { 1773 ret = PTR_ERR(pcie->phy); 1774 goto err_pm_runtime_put; 1775 } 1776 1777 ret = qcom_pcie_icc_init(pcie); 1778 if (ret) 1779 goto err_pm_runtime_put; 1780 1781 ret = pcie->cfg->ops->get_resources(pcie); 1782 if (ret) 1783 goto err_pm_runtime_put; 1784 1785 pp->ops = &qcom_pcie_dw_ops; 1786 1787 ret = phy_init(pcie->phy); 1788 if (ret) 1789 goto err_pm_runtime_put; 1790 1791 platform_set_drvdata(pdev, pcie); 1792 1793 ret = dw_pcie_host_init(pp); 1794 if (ret) { 1795 dev_err(dev, "cannot initialize host\n"); 1796 goto err_phy_exit; 1797 } 1798 1799 qcom_pcie_icc_update(pcie); 1800 1801 return 0; 1802 1803 err_phy_exit: 1804 phy_exit(pcie->phy); 1805 err_pm_runtime_put: 1806 pm_runtime_put(dev); 1807 pm_runtime_disable(dev); 1808 1809 return ret; 1810 } 1811 1812 static const struct of_device_id qcom_pcie_match[] = { 1813 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 1814 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 1815 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 1816 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 1817 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 1818 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 1819 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 1820 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 1821 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 1822 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, 1823 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 1824 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 1825 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, 1826 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 1827 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 1828 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 1829 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 1830 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 1831 { } 1832 }; 1833 1834 static void qcom_fixup_class(struct pci_dev *dev) 1835 { 1836 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1837 } 1838 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1839 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1840 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1841 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1842 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1843 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1844 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1845 1846 static struct platform_driver qcom_pcie_driver = { 1847 .probe = qcom_pcie_probe, 1848 .driver = { 1849 .name = "qcom-pcie", 1850 .suppress_bind_attrs = true, 1851 .of_match_table = qcom_pcie_match, 1852 }, 1853 }; 1854 builtin_platform_driver(qcom_pcie_driver); 1855