1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/pci.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/platform_device.h> 25 #include <linux/phy/phy.h> 26 #include <linux/regulator/consumer.h> 27 #include <linux/reset.h> 28 #include <linux/slab.h> 29 #include <linux/types.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 #define PCIE20_PARF_SYS_CTRL 0x00 35 #define MST_WAKEUP_EN BIT(13) 36 #define SLV_WAKEUP_EN BIT(12) 37 #define MSTR_ACLK_CGC_DIS BIT(10) 38 #define SLV_ACLK_CGC_DIS BIT(9) 39 #define CORE_CLK_CGC_DIS BIT(6) 40 #define AUX_PWR_DET BIT(4) 41 #define L23_CLK_RMV_DIS BIT(2) 42 #define L1_CLK_RMV_DIS BIT(1) 43 44 #define PCIE20_PARF_PHY_CTRL 0x40 45 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 46 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 47 48 #define PCIE20_PARF_PHY_REFCLK 0x4C 49 #define PHY_REFCLK_SSP_EN BIT(16) 50 #define PHY_REFCLK_USE_PAD BIT(12) 51 52 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 53 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 54 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 55 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 56 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 57 #define PCIE20_PARF_LTSSM 0x1B0 58 #define PCIE20_PARF_SID_OFFSET 0x234 59 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 60 #define PCIE20_PARF_DEVICE_TYPE 0x1000 61 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 62 63 #define PCIE20_ELBI_SYS_CTRL 0x04 64 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 65 66 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 67 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 68 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 69 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 70 #define CFG_BRIDGE_SB_INIT BIT(0) 71 72 #define PCIE_CAP_LINK1_VAL 0x2FD7F 73 74 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 75 76 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 77 #define DBI_RO_WR_EN 1 78 79 #define PERST_DELAY_US 1000 80 /* PARF registers */ 81 #define PCIE20_PARF_PCS_DEEMPH 0x34 82 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 83 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 84 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 85 86 #define PCIE20_PARF_PCS_SWING 0x38 87 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 88 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 89 90 #define PCIE20_PARF_CONFIG_BITS 0x50 91 #define PHY_RX0_EQ(x) ((x) << 24) 92 93 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 94 #define SLV_ADDR_SPACE_SZ 0x10000000 95 96 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 97 98 #define DEVICE_TYPE_RC 0x4 99 100 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 101 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 102 103 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 104 105 struct qcom_pcie_resources_2_1_0 { 106 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 107 struct reset_control *pci_reset; 108 struct reset_control *axi_reset; 109 struct reset_control *ahb_reset; 110 struct reset_control *por_reset; 111 struct reset_control *phy_reset; 112 struct reset_control *ext_reset; 113 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 114 }; 115 116 struct qcom_pcie_resources_1_0_0 { 117 struct clk *iface; 118 struct clk *aux; 119 struct clk *master_bus; 120 struct clk *slave_bus; 121 struct reset_control *core; 122 struct regulator *vdda; 123 }; 124 125 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 126 struct qcom_pcie_resources_2_3_2 { 127 struct clk *aux_clk; 128 struct clk *master_clk; 129 struct clk *slave_clk; 130 struct clk *cfg_clk; 131 struct clk *pipe_clk; 132 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 133 }; 134 135 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 136 struct qcom_pcie_resources_2_4_0 { 137 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 138 int num_clks; 139 struct reset_control *axi_m_reset; 140 struct reset_control *axi_s_reset; 141 struct reset_control *pipe_reset; 142 struct reset_control *axi_m_vmid_reset; 143 struct reset_control *axi_s_xpu_reset; 144 struct reset_control *parf_reset; 145 struct reset_control *phy_reset; 146 struct reset_control *axi_m_sticky_reset; 147 struct reset_control *pipe_sticky_reset; 148 struct reset_control *pwr_reset; 149 struct reset_control *ahb_reset; 150 struct reset_control *phy_ahb_reset; 151 }; 152 153 struct qcom_pcie_resources_2_3_3 { 154 struct clk *iface; 155 struct clk *axi_m_clk; 156 struct clk *axi_s_clk; 157 struct clk *ahb_clk; 158 struct clk *aux_clk; 159 struct reset_control *rst[7]; 160 }; 161 162 struct qcom_pcie_resources_2_7_0 { 163 struct clk_bulk_data clks[6]; 164 struct regulator_bulk_data supplies[2]; 165 struct reset_control *pci_reset; 166 struct clk *pipe_clk; 167 }; 168 169 union qcom_pcie_resources { 170 struct qcom_pcie_resources_1_0_0 v1_0_0; 171 struct qcom_pcie_resources_2_1_0 v2_1_0; 172 struct qcom_pcie_resources_2_3_2 v2_3_2; 173 struct qcom_pcie_resources_2_3_3 v2_3_3; 174 struct qcom_pcie_resources_2_4_0 v2_4_0; 175 struct qcom_pcie_resources_2_7_0 v2_7_0; 176 }; 177 178 struct qcom_pcie; 179 180 struct qcom_pcie_ops { 181 int (*get_resources)(struct qcom_pcie *pcie); 182 int (*init)(struct qcom_pcie *pcie); 183 int (*post_init)(struct qcom_pcie *pcie); 184 void (*deinit)(struct qcom_pcie *pcie); 185 void (*post_deinit)(struct qcom_pcie *pcie); 186 void (*ltssm_enable)(struct qcom_pcie *pcie); 187 int (*config_sid)(struct qcom_pcie *pcie); 188 }; 189 190 struct qcom_pcie { 191 struct dw_pcie *pci; 192 void __iomem *parf; /* DT parf */ 193 void __iomem *elbi; /* DT elbi */ 194 union qcom_pcie_resources res; 195 struct phy *phy; 196 struct gpio_desc *reset; 197 const struct qcom_pcie_ops *ops; 198 }; 199 200 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 201 202 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 203 { 204 gpiod_set_value_cansleep(pcie->reset, 1); 205 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 206 } 207 208 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 209 { 210 /* Ensure that PERST has been asserted for at least 100 ms */ 211 msleep(100); 212 gpiod_set_value_cansleep(pcie->reset, 0); 213 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 214 } 215 216 static int qcom_pcie_start_link(struct dw_pcie *pci) 217 { 218 struct qcom_pcie *pcie = to_qcom_pcie(pci); 219 220 /* Enable Link Training state machine */ 221 if (pcie->ops->ltssm_enable) 222 pcie->ops->ltssm_enable(pcie); 223 224 return 0; 225 } 226 227 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 228 { 229 u32 val; 230 231 /* enable link training */ 232 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 233 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 234 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 235 } 236 237 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 238 { 239 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 240 struct dw_pcie *pci = pcie->pci; 241 struct device *dev = pci->dev; 242 int ret; 243 244 res->supplies[0].supply = "vdda"; 245 res->supplies[1].supply = "vdda_phy"; 246 res->supplies[2].supply = "vdda_refclk"; 247 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 248 res->supplies); 249 if (ret) 250 return ret; 251 252 res->clks[0].id = "iface"; 253 res->clks[1].id = "core"; 254 res->clks[2].id = "phy"; 255 res->clks[3].id = "aux"; 256 res->clks[4].id = "ref"; 257 258 /* iface, core, phy are required */ 259 ret = devm_clk_bulk_get(dev, 3, res->clks); 260 if (ret < 0) 261 return ret; 262 263 /* aux, ref are optional */ 264 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 265 if (ret < 0) 266 return ret; 267 268 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 269 if (IS_ERR(res->pci_reset)) 270 return PTR_ERR(res->pci_reset); 271 272 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 273 if (IS_ERR(res->axi_reset)) 274 return PTR_ERR(res->axi_reset); 275 276 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 277 if (IS_ERR(res->ahb_reset)) 278 return PTR_ERR(res->ahb_reset); 279 280 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 281 if (IS_ERR(res->por_reset)) 282 return PTR_ERR(res->por_reset); 283 284 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 285 if (IS_ERR(res->ext_reset)) 286 return PTR_ERR(res->ext_reset); 287 288 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 289 return PTR_ERR_OR_ZERO(res->phy_reset); 290 } 291 292 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 293 { 294 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 295 296 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 297 reset_control_assert(res->pci_reset); 298 reset_control_assert(res->axi_reset); 299 reset_control_assert(res->ahb_reset); 300 reset_control_assert(res->por_reset); 301 reset_control_assert(res->ext_reset); 302 reset_control_assert(res->phy_reset); 303 304 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 305 306 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 307 } 308 309 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 310 { 311 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 312 struct dw_pcie *pci = pcie->pci; 313 struct device *dev = pci->dev; 314 struct device_node *node = dev->of_node; 315 u32 val; 316 int ret; 317 318 /* reset the PCIe interface as uboot can leave it undefined state */ 319 reset_control_assert(res->pci_reset); 320 reset_control_assert(res->axi_reset); 321 reset_control_assert(res->ahb_reset); 322 reset_control_assert(res->por_reset); 323 reset_control_assert(res->ext_reset); 324 reset_control_assert(res->phy_reset); 325 326 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 327 328 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 329 if (ret < 0) { 330 dev_err(dev, "cannot enable regulators\n"); 331 return ret; 332 } 333 334 ret = reset_control_deassert(res->ahb_reset); 335 if (ret) { 336 dev_err(dev, "cannot deassert ahb reset\n"); 337 goto err_deassert_ahb; 338 } 339 340 ret = reset_control_deassert(res->ext_reset); 341 if (ret) { 342 dev_err(dev, "cannot deassert ext reset\n"); 343 goto err_deassert_ext; 344 } 345 346 ret = reset_control_deassert(res->phy_reset); 347 if (ret) { 348 dev_err(dev, "cannot deassert phy reset\n"); 349 goto err_deassert_phy; 350 } 351 352 ret = reset_control_deassert(res->pci_reset); 353 if (ret) { 354 dev_err(dev, "cannot deassert pci reset\n"); 355 goto err_deassert_pci; 356 } 357 358 ret = reset_control_deassert(res->por_reset); 359 if (ret) { 360 dev_err(dev, "cannot deassert por reset\n"); 361 goto err_deassert_por; 362 } 363 364 ret = reset_control_deassert(res->axi_reset); 365 if (ret) { 366 dev_err(dev, "cannot deassert axi reset\n"); 367 goto err_deassert_axi; 368 } 369 370 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 371 if (ret) 372 goto err_clks; 373 374 /* enable PCIe clocks and resets */ 375 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 376 val &= ~BIT(0); 377 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 378 379 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 380 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 381 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 382 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 383 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 384 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 385 writel(PCS_SWING_TX_SWING_FULL(120) | 386 PCS_SWING_TX_SWING_LOW(120), 387 pcie->parf + PCIE20_PARF_PCS_SWING); 388 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 389 } 390 391 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 392 /* set TX termination offset */ 393 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 394 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 395 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 396 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 397 } 398 399 /* enable external reference clock */ 400 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 401 val &= ~PHY_REFCLK_USE_PAD; 402 val |= PHY_REFCLK_SSP_EN; 403 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 404 405 /* wait for clock acquisition */ 406 usleep_range(1000, 1500); 407 408 /* Set the Max TLP size to 2K, instead of using default of 4K */ 409 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 410 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 411 writel(CFG_BRIDGE_SB_INIT, 412 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 413 414 return 0; 415 416 err_clks: 417 reset_control_assert(res->axi_reset); 418 err_deassert_axi: 419 reset_control_assert(res->por_reset); 420 err_deassert_por: 421 reset_control_assert(res->pci_reset); 422 err_deassert_pci: 423 reset_control_assert(res->phy_reset); 424 err_deassert_phy: 425 reset_control_assert(res->ext_reset); 426 err_deassert_ext: 427 reset_control_assert(res->ahb_reset); 428 err_deassert_ahb: 429 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 430 431 return ret; 432 } 433 434 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 435 { 436 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 437 struct dw_pcie *pci = pcie->pci; 438 struct device *dev = pci->dev; 439 440 res->vdda = devm_regulator_get(dev, "vdda"); 441 if (IS_ERR(res->vdda)) 442 return PTR_ERR(res->vdda); 443 444 res->iface = devm_clk_get(dev, "iface"); 445 if (IS_ERR(res->iface)) 446 return PTR_ERR(res->iface); 447 448 res->aux = devm_clk_get(dev, "aux"); 449 if (IS_ERR(res->aux)) 450 return PTR_ERR(res->aux); 451 452 res->master_bus = devm_clk_get(dev, "master_bus"); 453 if (IS_ERR(res->master_bus)) 454 return PTR_ERR(res->master_bus); 455 456 res->slave_bus = devm_clk_get(dev, "slave_bus"); 457 if (IS_ERR(res->slave_bus)) 458 return PTR_ERR(res->slave_bus); 459 460 res->core = devm_reset_control_get_exclusive(dev, "core"); 461 return PTR_ERR_OR_ZERO(res->core); 462 } 463 464 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 465 { 466 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 467 468 reset_control_assert(res->core); 469 clk_disable_unprepare(res->slave_bus); 470 clk_disable_unprepare(res->master_bus); 471 clk_disable_unprepare(res->iface); 472 clk_disable_unprepare(res->aux); 473 regulator_disable(res->vdda); 474 } 475 476 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 477 { 478 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 479 struct dw_pcie *pci = pcie->pci; 480 struct device *dev = pci->dev; 481 int ret; 482 483 ret = reset_control_deassert(res->core); 484 if (ret) { 485 dev_err(dev, "cannot deassert core reset\n"); 486 return ret; 487 } 488 489 ret = clk_prepare_enable(res->aux); 490 if (ret) { 491 dev_err(dev, "cannot prepare/enable aux clock\n"); 492 goto err_res; 493 } 494 495 ret = clk_prepare_enable(res->iface); 496 if (ret) { 497 dev_err(dev, "cannot prepare/enable iface clock\n"); 498 goto err_aux; 499 } 500 501 ret = clk_prepare_enable(res->master_bus); 502 if (ret) { 503 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 504 goto err_iface; 505 } 506 507 ret = clk_prepare_enable(res->slave_bus); 508 if (ret) { 509 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 510 goto err_master; 511 } 512 513 ret = regulator_enable(res->vdda); 514 if (ret) { 515 dev_err(dev, "cannot enable vdda regulator\n"); 516 goto err_slave; 517 } 518 519 /* change DBI base address */ 520 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 521 522 if (IS_ENABLED(CONFIG_PCI_MSI)) { 523 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 524 525 val |= BIT(31); 526 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 527 } 528 529 return 0; 530 err_slave: 531 clk_disable_unprepare(res->slave_bus); 532 err_master: 533 clk_disable_unprepare(res->master_bus); 534 err_iface: 535 clk_disable_unprepare(res->iface); 536 err_aux: 537 clk_disable_unprepare(res->aux); 538 err_res: 539 reset_control_assert(res->core); 540 541 return ret; 542 } 543 544 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 545 { 546 u32 val; 547 548 /* enable link training */ 549 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 550 val |= BIT(8); 551 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 552 } 553 554 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 555 { 556 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 557 struct dw_pcie *pci = pcie->pci; 558 struct device *dev = pci->dev; 559 int ret; 560 561 res->supplies[0].supply = "vdda"; 562 res->supplies[1].supply = "vddpe-3v3"; 563 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 564 res->supplies); 565 if (ret) 566 return ret; 567 568 res->aux_clk = devm_clk_get(dev, "aux"); 569 if (IS_ERR(res->aux_clk)) 570 return PTR_ERR(res->aux_clk); 571 572 res->cfg_clk = devm_clk_get(dev, "cfg"); 573 if (IS_ERR(res->cfg_clk)) 574 return PTR_ERR(res->cfg_clk); 575 576 res->master_clk = devm_clk_get(dev, "bus_master"); 577 if (IS_ERR(res->master_clk)) 578 return PTR_ERR(res->master_clk); 579 580 res->slave_clk = devm_clk_get(dev, "bus_slave"); 581 if (IS_ERR(res->slave_clk)) 582 return PTR_ERR(res->slave_clk); 583 584 res->pipe_clk = devm_clk_get(dev, "pipe"); 585 return PTR_ERR_OR_ZERO(res->pipe_clk); 586 } 587 588 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 589 { 590 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 591 592 clk_disable_unprepare(res->slave_clk); 593 clk_disable_unprepare(res->master_clk); 594 clk_disable_unprepare(res->cfg_clk); 595 clk_disable_unprepare(res->aux_clk); 596 597 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 598 } 599 600 static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) 601 { 602 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 603 604 clk_disable_unprepare(res->pipe_clk); 605 } 606 607 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 608 { 609 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 610 struct dw_pcie *pci = pcie->pci; 611 struct device *dev = pci->dev; 612 u32 val; 613 int ret; 614 615 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 616 if (ret < 0) { 617 dev_err(dev, "cannot enable regulators\n"); 618 return ret; 619 } 620 621 ret = clk_prepare_enable(res->aux_clk); 622 if (ret) { 623 dev_err(dev, "cannot prepare/enable aux clock\n"); 624 goto err_aux_clk; 625 } 626 627 ret = clk_prepare_enable(res->cfg_clk); 628 if (ret) { 629 dev_err(dev, "cannot prepare/enable cfg clock\n"); 630 goto err_cfg_clk; 631 } 632 633 ret = clk_prepare_enable(res->master_clk); 634 if (ret) { 635 dev_err(dev, "cannot prepare/enable master clock\n"); 636 goto err_master_clk; 637 } 638 639 ret = clk_prepare_enable(res->slave_clk); 640 if (ret) { 641 dev_err(dev, "cannot prepare/enable slave clock\n"); 642 goto err_slave_clk; 643 } 644 645 /* enable PCIe clocks and resets */ 646 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 647 val &= ~BIT(0); 648 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 649 650 /* change DBI base address */ 651 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 652 653 /* MAC PHY_POWERDOWN MUX DISABLE */ 654 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 655 val &= ~BIT(29); 656 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 657 658 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 659 val |= BIT(4); 660 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 661 662 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 663 val |= BIT(31); 664 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 665 666 return 0; 667 668 err_slave_clk: 669 clk_disable_unprepare(res->master_clk); 670 err_master_clk: 671 clk_disable_unprepare(res->cfg_clk); 672 err_cfg_clk: 673 clk_disable_unprepare(res->aux_clk); 674 675 err_aux_clk: 676 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 677 678 return ret; 679 } 680 681 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 682 { 683 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 684 struct dw_pcie *pci = pcie->pci; 685 struct device *dev = pci->dev; 686 int ret; 687 688 ret = clk_prepare_enable(res->pipe_clk); 689 if (ret) { 690 dev_err(dev, "cannot prepare/enable pipe clock\n"); 691 return ret; 692 } 693 694 return 0; 695 } 696 697 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 698 { 699 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 700 struct dw_pcie *pci = pcie->pci; 701 struct device *dev = pci->dev; 702 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 703 int ret; 704 705 res->clks[0].id = "aux"; 706 res->clks[1].id = "master_bus"; 707 res->clks[2].id = "slave_bus"; 708 res->clks[3].id = "iface"; 709 710 /* qcom,pcie-ipq4019 is defined without "iface" */ 711 res->num_clks = is_ipq ? 3 : 4; 712 713 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 714 if (ret < 0) 715 return ret; 716 717 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 718 if (IS_ERR(res->axi_m_reset)) 719 return PTR_ERR(res->axi_m_reset); 720 721 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 722 if (IS_ERR(res->axi_s_reset)) 723 return PTR_ERR(res->axi_s_reset); 724 725 if (is_ipq) { 726 /* 727 * These resources relates to the PHY or are secure clocks, but 728 * are controlled here for IPQ4019 729 */ 730 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 731 if (IS_ERR(res->pipe_reset)) 732 return PTR_ERR(res->pipe_reset); 733 734 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 735 "axi_m_vmid"); 736 if (IS_ERR(res->axi_m_vmid_reset)) 737 return PTR_ERR(res->axi_m_vmid_reset); 738 739 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 740 "axi_s_xpu"); 741 if (IS_ERR(res->axi_s_xpu_reset)) 742 return PTR_ERR(res->axi_s_xpu_reset); 743 744 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 745 if (IS_ERR(res->parf_reset)) 746 return PTR_ERR(res->parf_reset); 747 748 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 749 if (IS_ERR(res->phy_reset)) 750 return PTR_ERR(res->phy_reset); 751 } 752 753 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 754 "axi_m_sticky"); 755 if (IS_ERR(res->axi_m_sticky_reset)) 756 return PTR_ERR(res->axi_m_sticky_reset); 757 758 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 759 "pipe_sticky"); 760 if (IS_ERR(res->pipe_sticky_reset)) 761 return PTR_ERR(res->pipe_sticky_reset); 762 763 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 764 if (IS_ERR(res->pwr_reset)) 765 return PTR_ERR(res->pwr_reset); 766 767 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 768 if (IS_ERR(res->ahb_reset)) 769 return PTR_ERR(res->ahb_reset); 770 771 if (is_ipq) { 772 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 773 if (IS_ERR(res->phy_ahb_reset)) 774 return PTR_ERR(res->phy_ahb_reset); 775 } 776 777 return 0; 778 } 779 780 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 781 { 782 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 783 784 reset_control_assert(res->axi_m_reset); 785 reset_control_assert(res->axi_s_reset); 786 reset_control_assert(res->pipe_reset); 787 reset_control_assert(res->pipe_sticky_reset); 788 reset_control_assert(res->phy_reset); 789 reset_control_assert(res->phy_ahb_reset); 790 reset_control_assert(res->axi_m_sticky_reset); 791 reset_control_assert(res->pwr_reset); 792 reset_control_assert(res->ahb_reset); 793 clk_bulk_disable_unprepare(res->num_clks, res->clks); 794 } 795 796 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 797 { 798 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 799 struct dw_pcie *pci = pcie->pci; 800 struct device *dev = pci->dev; 801 u32 val; 802 int ret; 803 804 ret = reset_control_assert(res->axi_m_reset); 805 if (ret) { 806 dev_err(dev, "cannot assert axi master reset\n"); 807 return ret; 808 } 809 810 ret = reset_control_assert(res->axi_s_reset); 811 if (ret) { 812 dev_err(dev, "cannot assert axi slave reset\n"); 813 return ret; 814 } 815 816 usleep_range(10000, 12000); 817 818 ret = reset_control_assert(res->pipe_reset); 819 if (ret) { 820 dev_err(dev, "cannot assert pipe reset\n"); 821 return ret; 822 } 823 824 ret = reset_control_assert(res->pipe_sticky_reset); 825 if (ret) { 826 dev_err(dev, "cannot assert pipe sticky reset\n"); 827 return ret; 828 } 829 830 ret = reset_control_assert(res->phy_reset); 831 if (ret) { 832 dev_err(dev, "cannot assert phy reset\n"); 833 return ret; 834 } 835 836 ret = reset_control_assert(res->phy_ahb_reset); 837 if (ret) { 838 dev_err(dev, "cannot assert phy ahb reset\n"); 839 return ret; 840 } 841 842 usleep_range(10000, 12000); 843 844 ret = reset_control_assert(res->axi_m_sticky_reset); 845 if (ret) { 846 dev_err(dev, "cannot assert axi master sticky reset\n"); 847 return ret; 848 } 849 850 ret = reset_control_assert(res->pwr_reset); 851 if (ret) { 852 dev_err(dev, "cannot assert power reset\n"); 853 return ret; 854 } 855 856 ret = reset_control_assert(res->ahb_reset); 857 if (ret) { 858 dev_err(dev, "cannot assert ahb reset\n"); 859 return ret; 860 } 861 862 usleep_range(10000, 12000); 863 864 ret = reset_control_deassert(res->phy_ahb_reset); 865 if (ret) { 866 dev_err(dev, "cannot deassert phy ahb reset\n"); 867 return ret; 868 } 869 870 ret = reset_control_deassert(res->phy_reset); 871 if (ret) { 872 dev_err(dev, "cannot deassert phy reset\n"); 873 goto err_rst_phy; 874 } 875 876 ret = reset_control_deassert(res->pipe_reset); 877 if (ret) { 878 dev_err(dev, "cannot deassert pipe reset\n"); 879 goto err_rst_pipe; 880 } 881 882 ret = reset_control_deassert(res->pipe_sticky_reset); 883 if (ret) { 884 dev_err(dev, "cannot deassert pipe sticky reset\n"); 885 goto err_rst_pipe_sticky; 886 } 887 888 usleep_range(10000, 12000); 889 890 ret = reset_control_deassert(res->axi_m_reset); 891 if (ret) { 892 dev_err(dev, "cannot deassert axi master reset\n"); 893 goto err_rst_axi_m; 894 } 895 896 ret = reset_control_deassert(res->axi_m_sticky_reset); 897 if (ret) { 898 dev_err(dev, "cannot deassert axi master sticky reset\n"); 899 goto err_rst_axi_m_sticky; 900 } 901 902 ret = reset_control_deassert(res->axi_s_reset); 903 if (ret) { 904 dev_err(dev, "cannot deassert axi slave reset\n"); 905 goto err_rst_axi_s; 906 } 907 908 ret = reset_control_deassert(res->pwr_reset); 909 if (ret) { 910 dev_err(dev, "cannot deassert power reset\n"); 911 goto err_rst_pwr; 912 } 913 914 ret = reset_control_deassert(res->ahb_reset); 915 if (ret) { 916 dev_err(dev, "cannot deassert ahb reset\n"); 917 goto err_rst_ahb; 918 } 919 920 usleep_range(10000, 12000); 921 922 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 923 if (ret) 924 goto err_clks; 925 926 /* enable PCIe clocks and resets */ 927 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 928 val &= ~BIT(0); 929 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 930 931 /* change DBI base address */ 932 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 933 934 /* MAC PHY_POWERDOWN MUX DISABLE */ 935 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 936 val &= ~BIT(29); 937 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 938 939 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 940 val |= BIT(4); 941 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 942 943 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 944 val |= BIT(31); 945 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 946 947 return 0; 948 949 err_clks: 950 reset_control_assert(res->ahb_reset); 951 err_rst_ahb: 952 reset_control_assert(res->pwr_reset); 953 err_rst_pwr: 954 reset_control_assert(res->axi_s_reset); 955 err_rst_axi_s: 956 reset_control_assert(res->axi_m_sticky_reset); 957 err_rst_axi_m_sticky: 958 reset_control_assert(res->axi_m_reset); 959 err_rst_axi_m: 960 reset_control_assert(res->pipe_sticky_reset); 961 err_rst_pipe_sticky: 962 reset_control_assert(res->pipe_reset); 963 err_rst_pipe: 964 reset_control_assert(res->phy_reset); 965 err_rst_phy: 966 reset_control_assert(res->phy_ahb_reset); 967 return ret; 968 } 969 970 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 971 { 972 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 973 struct dw_pcie *pci = pcie->pci; 974 struct device *dev = pci->dev; 975 int i; 976 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 977 "axi_m_sticky", "sticky", 978 "ahb", "sleep", }; 979 980 res->iface = devm_clk_get(dev, "iface"); 981 if (IS_ERR(res->iface)) 982 return PTR_ERR(res->iface); 983 984 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 985 if (IS_ERR(res->axi_m_clk)) 986 return PTR_ERR(res->axi_m_clk); 987 988 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 989 if (IS_ERR(res->axi_s_clk)) 990 return PTR_ERR(res->axi_s_clk); 991 992 res->ahb_clk = devm_clk_get(dev, "ahb"); 993 if (IS_ERR(res->ahb_clk)) 994 return PTR_ERR(res->ahb_clk); 995 996 res->aux_clk = devm_clk_get(dev, "aux"); 997 if (IS_ERR(res->aux_clk)) 998 return PTR_ERR(res->aux_clk); 999 1000 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1001 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1002 if (IS_ERR(res->rst[i])) 1003 return PTR_ERR(res->rst[i]); 1004 } 1005 1006 return 0; 1007 } 1008 1009 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1010 { 1011 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1012 1013 clk_disable_unprepare(res->iface); 1014 clk_disable_unprepare(res->axi_m_clk); 1015 clk_disable_unprepare(res->axi_s_clk); 1016 clk_disable_unprepare(res->ahb_clk); 1017 clk_disable_unprepare(res->aux_clk); 1018 } 1019 1020 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1021 { 1022 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1023 struct dw_pcie *pci = pcie->pci; 1024 struct device *dev = pci->dev; 1025 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1026 int i, ret; 1027 u32 val; 1028 1029 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1030 ret = reset_control_assert(res->rst[i]); 1031 if (ret) { 1032 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1033 return ret; 1034 } 1035 } 1036 1037 usleep_range(2000, 2500); 1038 1039 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1040 ret = reset_control_deassert(res->rst[i]); 1041 if (ret) { 1042 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1043 ret); 1044 return ret; 1045 } 1046 } 1047 1048 /* 1049 * Don't have a way to see if the reset has completed. 1050 * Wait for some time. 1051 */ 1052 usleep_range(2000, 2500); 1053 1054 ret = clk_prepare_enable(res->iface); 1055 if (ret) { 1056 dev_err(dev, "cannot prepare/enable core clock\n"); 1057 goto err_clk_iface; 1058 } 1059 1060 ret = clk_prepare_enable(res->axi_m_clk); 1061 if (ret) { 1062 dev_err(dev, "cannot prepare/enable core clock\n"); 1063 goto err_clk_axi_m; 1064 } 1065 1066 ret = clk_prepare_enable(res->axi_s_clk); 1067 if (ret) { 1068 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1069 goto err_clk_axi_s; 1070 } 1071 1072 ret = clk_prepare_enable(res->ahb_clk); 1073 if (ret) { 1074 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1075 goto err_clk_ahb; 1076 } 1077 1078 ret = clk_prepare_enable(res->aux_clk); 1079 if (ret) { 1080 dev_err(dev, "cannot prepare/enable aux clock\n"); 1081 goto err_clk_aux; 1082 } 1083 1084 writel(SLV_ADDR_SPACE_SZ, 1085 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1086 1087 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1088 val &= ~BIT(0); 1089 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1090 1091 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1092 1093 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1094 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1095 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1096 pcie->parf + PCIE20_PARF_SYS_CTRL); 1097 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1098 1099 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1100 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1101 writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1102 1103 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1104 val &= ~PCI_EXP_LNKCAP_ASPMS; 1105 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1106 1107 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1108 PCI_EXP_DEVCTL2); 1109 1110 return 0; 1111 1112 err_clk_aux: 1113 clk_disable_unprepare(res->ahb_clk); 1114 err_clk_ahb: 1115 clk_disable_unprepare(res->axi_s_clk); 1116 err_clk_axi_s: 1117 clk_disable_unprepare(res->axi_m_clk); 1118 err_clk_axi_m: 1119 clk_disable_unprepare(res->iface); 1120 err_clk_iface: 1121 /* 1122 * Not checking for failure, will anyway return 1123 * the original failure in 'ret'. 1124 */ 1125 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1126 reset_control_assert(res->rst[i]); 1127 1128 return ret; 1129 } 1130 1131 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1132 { 1133 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1134 struct dw_pcie *pci = pcie->pci; 1135 struct device *dev = pci->dev; 1136 int ret; 1137 1138 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1139 if (IS_ERR(res->pci_reset)) 1140 return PTR_ERR(res->pci_reset); 1141 1142 res->supplies[0].supply = "vdda"; 1143 res->supplies[1].supply = "vddpe-3v3"; 1144 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1145 res->supplies); 1146 if (ret) 1147 return ret; 1148 1149 res->clks[0].id = "aux"; 1150 res->clks[1].id = "cfg"; 1151 res->clks[2].id = "bus_master"; 1152 res->clks[3].id = "bus_slave"; 1153 res->clks[4].id = "slave_q2a"; 1154 res->clks[5].id = "tbu"; 1155 1156 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1157 if (ret < 0) 1158 return ret; 1159 1160 res->pipe_clk = devm_clk_get(dev, "pipe"); 1161 return PTR_ERR_OR_ZERO(res->pipe_clk); 1162 } 1163 1164 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1165 { 1166 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1167 struct dw_pcie *pci = pcie->pci; 1168 struct device *dev = pci->dev; 1169 u32 val; 1170 int ret; 1171 1172 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1173 if (ret < 0) { 1174 dev_err(dev, "cannot enable regulators\n"); 1175 return ret; 1176 } 1177 1178 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1179 if (ret < 0) 1180 goto err_disable_regulators; 1181 1182 ret = reset_control_assert(res->pci_reset); 1183 if (ret < 0) { 1184 dev_err(dev, "cannot deassert pci reset\n"); 1185 goto err_disable_clocks; 1186 } 1187 1188 usleep_range(1000, 1500); 1189 1190 ret = reset_control_deassert(res->pci_reset); 1191 if (ret < 0) { 1192 dev_err(dev, "cannot deassert pci reset\n"); 1193 goto err_disable_clocks; 1194 } 1195 1196 ret = clk_prepare_enable(res->pipe_clk); 1197 if (ret) { 1198 dev_err(dev, "cannot prepare/enable pipe clock\n"); 1199 goto err_disable_clocks; 1200 } 1201 1202 /* configure PCIe to RC mode */ 1203 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1204 1205 /* enable PCIe clocks and resets */ 1206 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1207 val &= ~BIT(0); 1208 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1209 1210 /* change DBI base address */ 1211 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1212 1213 /* MAC PHY_POWERDOWN MUX DISABLE */ 1214 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1215 val &= ~BIT(29); 1216 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1217 1218 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1219 val |= BIT(4); 1220 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1221 1222 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1223 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1224 val |= BIT(31); 1225 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1226 } 1227 1228 return 0; 1229 err_disable_clocks: 1230 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1231 err_disable_regulators: 1232 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1233 1234 return ret; 1235 } 1236 1237 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1238 { 1239 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1240 1241 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1242 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1243 } 1244 1245 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 1246 { 1247 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1248 1249 return clk_prepare_enable(res->pipe_clk); 1250 } 1251 1252 static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie) 1253 { 1254 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1255 1256 clk_disable_unprepare(res->pipe_clk); 1257 } 1258 1259 static int qcom_pcie_link_up(struct dw_pcie *pci) 1260 { 1261 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1262 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1263 1264 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1265 } 1266 1267 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1268 { 1269 /* iommu map structure */ 1270 struct { 1271 u32 bdf; 1272 u32 phandle; 1273 u32 smmu_sid; 1274 u32 smmu_sid_len; 1275 } *map; 1276 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1277 struct device *dev = pcie->pci->dev; 1278 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1279 int i, nr_map, size = 0; 1280 u32 smmu_sid_base; 1281 1282 of_get_property(dev->of_node, "iommu-map", &size); 1283 if (!size) 1284 return 0; 1285 1286 map = kzalloc(size, GFP_KERNEL); 1287 if (!map) 1288 return -ENOMEM; 1289 1290 of_property_read_u32_array(dev->of_node, 1291 "iommu-map", (u32 *)map, size / sizeof(u32)); 1292 1293 nr_map = size / (sizeof(*map)); 1294 1295 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1296 1297 /* Registers need to be zero out first */ 1298 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1299 1300 /* Extract the SMMU SID base from the first entry of iommu-map */ 1301 smmu_sid_base = map[0].smmu_sid; 1302 1303 /* Look for an available entry to hold the mapping */ 1304 for (i = 0; i < nr_map; i++) { 1305 u16 bdf_be = cpu_to_be16(map[i].bdf); 1306 u32 val; 1307 u8 hash; 1308 1309 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1310 0); 1311 1312 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1313 1314 /* If the register is already populated, look for next available entry */ 1315 while (val) { 1316 u8 current_hash = hash++; 1317 u8 next_mask = 0xff; 1318 1319 /* If NEXT field is NULL then update it with next hash */ 1320 if (!(val & next_mask)) { 1321 val |= (u32)hash; 1322 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1323 } 1324 1325 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1326 } 1327 1328 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1329 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1330 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1331 } 1332 1333 kfree(map); 1334 1335 return 0; 1336 } 1337 1338 static int qcom_pcie_host_init(struct pcie_port *pp) 1339 { 1340 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1341 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1342 int ret; 1343 1344 qcom_ep_reset_assert(pcie); 1345 1346 ret = pcie->ops->init(pcie); 1347 if (ret) 1348 return ret; 1349 1350 ret = phy_power_on(pcie->phy); 1351 if (ret) 1352 goto err_deinit; 1353 1354 if (pcie->ops->post_init) { 1355 ret = pcie->ops->post_init(pcie); 1356 if (ret) 1357 goto err_disable_phy; 1358 } 1359 1360 qcom_ep_reset_deassert(pcie); 1361 1362 if (pcie->ops->config_sid) { 1363 ret = pcie->ops->config_sid(pcie); 1364 if (ret) 1365 goto err; 1366 } 1367 1368 return 0; 1369 1370 err: 1371 qcom_ep_reset_assert(pcie); 1372 if (pcie->ops->post_deinit) 1373 pcie->ops->post_deinit(pcie); 1374 err_disable_phy: 1375 phy_power_off(pcie->phy); 1376 err_deinit: 1377 pcie->ops->deinit(pcie); 1378 1379 return ret; 1380 } 1381 1382 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1383 .host_init = qcom_pcie_host_init, 1384 }; 1385 1386 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1387 static const struct qcom_pcie_ops ops_2_1_0 = { 1388 .get_resources = qcom_pcie_get_resources_2_1_0, 1389 .init = qcom_pcie_init_2_1_0, 1390 .deinit = qcom_pcie_deinit_2_1_0, 1391 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1392 }; 1393 1394 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1395 static const struct qcom_pcie_ops ops_1_0_0 = { 1396 .get_resources = qcom_pcie_get_resources_1_0_0, 1397 .init = qcom_pcie_init_1_0_0, 1398 .deinit = qcom_pcie_deinit_1_0_0, 1399 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1400 }; 1401 1402 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1403 static const struct qcom_pcie_ops ops_2_3_2 = { 1404 .get_resources = qcom_pcie_get_resources_2_3_2, 1405 .init = qcom_pcie_init_2_3_2, 1406 .post_init = qcom_pcie_post_init_2_3_2, 1407 .deinit = qcom_pcie_deinit_2_3_2, 1408 .post_deinit = qcom_pcie_post_deinit_2_3_2, 1409 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1410 }; 1411 1412 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1413 static const struct qcom_pcie_ops ops_2_4_0 = { 1414 .get_resources = qcom_pcie_get_resources_2_4_0, 1415 .init = qcom_pcie_init_2_4_0, 1416 .deinit = qcom_pcie_deinit_2_4_0, 1417 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1418 }; 1419 1420 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1421 static const struct qcom_pcie_ops ops_2_3_3 = { 1422 .get_resources = qcom_pcie_get_resources_2_3_3, 1423 .init = qcom_pcie_init_2_3_3, 1424 .deinit = qcom_pcie_deinit_2_3_3, 1425 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1426 }; 1427 1428 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1429 static const struct qcom_pcie_ops ops_2_7_0 = { 1430 .get_resources = qcom_pcie_get_resources_2_7_0, 1431 .init = qcom_pcie_init_2_7_0, 1432 .deinit = qcom_pcie_deinit_2_7_0, 1433 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1434 .post_init = qcom_pcie_post_init_2_7_0, 1435 .post_deinit = qcom_pcie_post_deinit_2_7_0, 1436 }; 1437 1438 /* Qcom IP rev.: 1.9.0 */ 1439 static const struct qcom_pcie_ops ops_1_9_0 = { 1440 .get_resources = qcom_pcie_get_resources_2_7_0, 1441 .init = qcom_pcie_init_2_7_0, 1442 .deinit = qcom_pcie_deinit_2_7_0, 1443 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1444 .post_init = qcom_pcie_post_init_2_7_0, 1445 .post_deinit = qcom_pcie_post_deinit_2_7_0, 1446 .config_sid = qcom_pcie_config_sid_sm8250, 1447 }; 1448 1449 static const struct dw_pcie_ops dw_pcie_ops = { 1450 .link_up = qcom_pcie_link_up, 1451 .start_link = qcom_pcie_start_link, 1452 }; 1453 1454 static int qcom_pcie_probe(struct platform_device *pdev) 1455 { 1456 struct device *dev = &pdev->dev; 1457 struct pcie_port *pp; 1458 struct dw_pcie *pci; 1459 struct qcom_pcie *pcie; 1460 int ret; 1461 1462 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1463 if (!pcie) 1464 return -ENOMEM; 1465 1466 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1467 if (!pci) 1468 return -ENOMEM; 1469 1470 pm_runtime_enable(dev); 1471 ret = pm_runtime_get_sync(dev); 1472 if (ret < 0) 1473 goto err_pm_runtime_put; 1474 1475 pci->dev = dev; 1476 pci->ops = &dw_pcie_ops; 1477 pp = &pci->pp; 1478 1479 pcie->pci = pci; 1480 1481 pcie->ops = of_device_get_match_data(dev); 1482 1483 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1484 if (IS_ERR(pcie->reset)) { 1485 ret = PTR_ERR(pcie->reset); 1486 goto err_pm_runtime_put; 1487 } 1488 1489 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1490 if (IS_ERR(pcie->parf)) { 1491 ret = PTR_ERR(pcie->parf); 1492 goto err_pm_runtime_put; 1493 } 1494 1495 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1496 if (IS_ERR(pcie->elbi)) { 1497 ret = PTR_ERR(pcie->elbi); 1498 goto err_pm_runtime_put; 1499 } 1500 1501 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1502 if (IS_ERR(pcie->phy)) { 1503 ret = PTR_ERR(pcie->phy); 1504 goto err_pm_runtime_put; 1505 } 1506 1507 ret = pcie->ops->get_resources(pcie); 1508 if (ret) 1509 goto err_pm_runtime_put; 1510 1511 pp->ops = &qcom_pcie_dw_ops; 1512 1513 ret = phy_init(pcie->phy); 1514 if (ret) { 1515 pm_runtime_disable(&pdev->dev); 1516 goto err_pm_runtime_put; 1517 } 1518 1519 platform_set_drvdata(pdev, pcie); 1520 1521 ret = dw_pcie_host_init(pp); 1522 if (ret) { 1523 dev_err(dev, "cannot initialize host\n"); 1524 pm_runtime_disable(&pdev->dev); 1525 goto err_pm_runtime_put; 1526 } 1527 1528 return 0; 1529 1530 err_pm_runtime_put: 1531 pm_runtime_put(dev); 1532 pm_runtime_disable(dev); 1533 1534 return ret; 1535 } 1536 1537 static const struct of_device_id qcom_pcie_match[] = { 1538 { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, 1539 { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, 1540 { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 }, 1541 { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, 1542 { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, 1543 { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, 1544 { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, 1545 { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 }, 1546 { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 }, 1547 { .compatible = "qcom,pcie-sm8250", .data = &ops_1_9_0 }, 1548 { } 1549 }; 1550 1551 static void qcom_fixup_class(struct pci_dev *dev) 1552 { 1553 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 1554 } 1555 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1556 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1557 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1558 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1559 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1560 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1561 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1562 1563 static struct platform_driver qcom_pcie_driver = { 1564 .probe = qcom_pcie_probe, 1565 .driver = { 1566 .name = "qcom-pcie", 1567 .suppress_bind_attrs = true, 1568 .of_match_table = qcom_pcie_match, 1569 }, 1570 }; 1571 builtin_platform_driver(qcom_pcie_driver); 1572