1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/pci.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/platform_device.h> 25 #include <linux/phy/phy.h> 26 #include <linux/regulator/consumer.h> 27 #include <linux/reset.h> 28 #include <linux/slab.h> 29 #include <linux/types.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 #define PCIE20_PARF_SYS_CTRL 0x00 35 #define MST_WAKEUP_EN BIT(13) 36 #define SLV_WAKEUP_EN BIT(12) 37 #define MSTR_ACLK_CGC_DIS BIT(10) 38 #define SLV_ACLK_CGC_DIS BIT(9) 39 #define CORE_CLK_CGC_DIS BIT(6) 40 #define AUX_PWR_DET BIT(4) 41 #define L23_CLK_RMV_DIS BIT(2) 42 #define L1_CLK_RMV_DIS BIT(1) 43 44 #define PCIE20_PARF_PM_CTRL 0x20 45 #define REQ_NOT_ENTR_L1 BIT(5) 46 47 #define PCIE20_PARF_PHY_CTRL 0x40 48 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 49 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 50 51 #define PCIE20_PARF_PHY_REFCLK 0x4C 52 #define PHY_REFCLK_SSP_EN BIT(16) 53 #define PHY_REFCLK_USE_PAD BIT(12) 54 55 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 56 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 57 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 58 #define AHB_CLK_EN BIT(0) 59 #define MSTR_AXI_CLK_EN BIT(1) 60 #define BYPASS BIT(4) 61 62 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 63 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 64 #define PCIE20_PARF_LTSSM 0x1B0 65 #define PCIE20_PARF_SID_OFFSET 0x234 66 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 67 #define PCIE20_PARF_DEVICE_TYPE 0x1000 68 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 69 70 #define PCIE20_ELBI_SYS_CTRL 0x04 71 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 72 73 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 74 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 75 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 76 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 77 #define CFG_BRIDGE_SB_INIT BIT(0) 78 79 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ 80 250) 81 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ 82 1) 83 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 84 PCI_EXP_SLTCAP_PCP | \ 85 PCI_EXP_SLTCAP_MRLSP | \ 86 PCI_EXP_SLTCAP_AIP | \ 87 PCI_EXP_SLTCAP_PIP | \ 88 PCI_EXP_SLTCAP_HPS | \ 89 PCI_EXP_SLTCAP_HPC | \ 90 PCI_EXP_SLTCAP_EIP | \ 91 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 92 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 93 94 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 95 96 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 97 #define DBI_RO_WR_EN 1 98 99 #define PERST_DELAY_US 1000 100 /* PARF registers */ 101 #define PCIE20_PARF_PCS_DEEMPH 0x34 102 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 103 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 105 106 #define PCIE20_PARF_PCS_SWING 0x38 107 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 108 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 109 110 #define PCIE20_PARF_CONFIG_BITS 0x50 111 #define PHY_RX0_EQ(x) ((x) << 24) 112 113 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 114 #define SLV_ADDR_SPACE_SZ 0x10000000 115 116 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 117 118 #define DEVICE_TYPE_RC 0x4 119 120 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 121 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 122 123 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 124 125 struct qcom_pcie_resources_2_1_0 { 126 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 127 struct reset_control *pci_reset; 128 struct reset_control *axi_reset; 129 struct reset_control *ahb_reset; 130 struct reset_control *por_reset; 131 struct reset_control *phy_reset; 132 struct reset_control *ext_reset; 133 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 134 }; 135 136 struct qcom_pcie_resources_1_0_0 { 137 struct clk *iface; 138 struct clk *aux; 139 struct clk *master_bus; 140 struct clk *slave_bus; 141 struct reset_control *core; 142 struct regulator *vdda; 143 }; 144 145 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 146 struct qcom_pcie_resources_2_3_2 { 147 struct clk *aux_clk; 148 struct clk *master_clk; 149 struct clk *slave_clk; 150 struct clk *cfg_clk; 151 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 152 }; 153 154 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 155 struct qcom_pcie_resources_2_4_0 { 156 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 157 int num_clks; 158 struct reset_control *axi_m_reset; 159 struct reset_control *axi_s_reset; 160 struct reset_control *pipe_reset; 161 struct reset_control *axi_m_vmid_reset; 162 struct reset_control *axi_s_xpu_reset; 163 struct reset_control *parf_reset; 164 struct reset_control *phy_reset; 165 struct reset_control *axi_m_sticky_reset; 166 struct reset_control *pipe_sticky_reset; 167 struct reset_control *pwr_reset; 168 struct reset_control *ahb_reset; 169 struct reset_control *phy_ahb_reset; 170 }; 171 172 struct qcom_pcie_resources_2_3_3 { 173 struct clk *iface; 174 struct clk *axi_m_clk; 175 struct clk *axi_s_clk; 176 struct clk *ahb_clk; 177 struct clk *aux_clk; 178 struct reset_control *rst[7]; 179 }; 180 181 /* 6 clocks typically, 7 for sm8250 */ 182 struct qcom_pcie_resources_2_7_0 { 183 struct clk_bulk_data clks[9]; 184 int num_clks; 185 struct regulator_bulk_data supplies[2]; 186 struct reset_control *pci_reset; 187 }; 188 189 struct qcom_pcie_resources_2_9_0 { 190 struct clk_bulk_data clks[5]; 191 struct reset_control *rst; 192 }; 193 194 union qcom_pcie_resources { 195 struct qcom_pcie_resources_1_0_0 v1_0_0; 196 struct qcom_pcie_resources_2_1_0 v2_1_0; 197 struct qcom_pcie_resources_2_3_2 v2_3_2; 198 struct qcom_pcie_resources_2_3_3 v2_3_3; 199 struct qcom_pcie_resources_2_4_0 v2_4_0; 200 struct qcom_pcie_resources_2_7_0 v2_7_0; 201 struct qcom_pcie_resources_2_9_0 v2_9_0; 202 }; 203 204 struct qcom_pcie; 205 206 struct qcom_pcie_ops { 207 int (*get_resources)(struct qcom_pcie *pcie); 208 int (*init)(struct qcom_pcie *pcie); 209 int (*post_init)(struct qcom_pcie *pcie); 210 void (*deinit)(struct qcom_pcie *pcie); 211 void (*post_deinit)(struct qcom_pcie *pcie); 212 void (*ltssm_enable)(struct qcom_pcie *pcie); 213 int (*config_sid)(struct qcom_pcie *pcie); 214 }; 215 216 struct qcom_pcie_cfg { 217 const struct qcom_pcie_ops *ops; 218 unsigned int has_tbu_clk:1; 219 unsigned int has_ddrss_sf_tbu_clk:1; 220 unsigned int has_aggre0_clk:1; 221 unsigned int has_aggre1_clk:1; 222 }; 223 224 struct qcom_pcie { 225 struct dw_pcie *pci; 226 void __iomem *parf; /* DT parf */ 227 void __iomem *elbi; /* DT elbi */ 228 union qcom_pcie_resources res; 229 struct phy *phy; 230 struct gpio_desc *reset; 231 const struct qcom_pcie_cfg *cfg; 232 }; 233 234 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 235 236 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 237 { 238 gpiod_set_value_cansleep(pcie->reset, 1); 239 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 240 } 241 242 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 243 { 244 /* Ensure that PERST has been asserted for at least 100 ms */ 245 msleep(100); 246 gpiod_set_value_cansleep(pcie->reset, 0); 247 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 248 } 249 250 static int qcom_pcie_start_link(struct dw_pcie *pci) 251 { 252 struct qcom_pcie *pcie = to_qcom_pcie(pci); 253 254 /* Enable Link Training state machine */ 255 if (pcie->cfg->ops->ltssm_enable) 256 pcie->cfg->ops->ltssm_enable(pcie); 257 258 return 0; 259 } 260 261 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 262 { 263 u32 val; 264 265 /* enable link training */ 266 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 267 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 268 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 269 } 270 271 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 272 { 273 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 274 struct dw_pcie *pci = pcie->pci; 275 struct device *dev = pci->dev; 276 int ret; 277 278 res->supplies[0].supply = "vdda"; 279 res->supplies[1].supply = "vdda_phy"; 280 res->supplies[2].supply = "vdda_refclk"; 281 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 282 res->supplies); 283 if (ret) 284 return ret; 285 286 res->clks[0].id = "iface"; 287 res->clks[1].id = "core"; 288 res->clks[2].id = "phy"; 289 res->clks[3].id = "aux"; 290 res->clks[4].id = "ref"; 291 292 /* iface, core, phy are required */ 293 ret = devm_clk_bulk_get(dev, 3, res->clks); 294 if (ret < 0) 295 return ret; 296 297 /* aux, ref are optional */ 298 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 299 if (ret < 0) 300 return ret; 301 302 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 303 if (IS_ERR(res->pci_reset)) 304 return PTR_ERR(res->pci_reset); 305 306 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 307 if (IS_ERR(res->axi_reset)) 308 return PTR_ERR(res->axi_reset); 309 310 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 311 if (IS_ERR(res->ahb_reset)) 312 return PTR_ERR(res->ahb_reset); 313 314 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 315 if (IS_ERR(res->por_reset)) 316 return PTR_ERR(res->por_reset); 317 318 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 319 if (IS_ERR(res->ext_reset)) 320 return PTR_ERR(res->ext_reset); 321 322 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 323 return PTR_ERR_OR_ZERO(res->phy_reset); 324 } 325 326 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 327 { 328 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 329 330 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 331 reset_control_assert(res->pci_reset); 332 reset_control_assert(res->axi_reset); 333 reset_control_assert(res->ahb_reset); 334 reset_control_assert(res->por_reset); 335 reset_control_assert(res->ext_reset); 336 reset_control_assert(res->phy_reset); 337 338 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 339 340 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 341 } 342 343 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 344 { 345 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 346 struct dw_pcie *pci = pcie->pci; 347 struct device *dev = pci->dev; 348 int ret; 349 350 /* reset the PCIe interface as uboot can leave it undefined state */ 351 reset_control_assert(res->pci_reset); 352 reset_control_assert(res->axi_reset); 353 reset_control_assert(res->ahb_reset); 354 reset_control_assert(res->por_reset); 355 reset_control_assert(res->ext_reset); 356 reset_control_assert(res->phy_reset); 357 358 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 359 if (ret < 0) { 360 dev_err(dev, "cannot enable regulators\n"); 361 return ret; 362 } 363 364 ret = reset_control_deassert(res->ahb_reset); 365 if (ret) { 366 dev_err(dev, "cannot deassert ahb reset\n"); 367 goto err_deassert_ahb; 368 } 369 370 ret = reset_control_deassert(res->ext_reset); 371 if (ret) { 372 dev_err(dev, "cannot deassert ext reset\n"); 373 goto err_deassert_ext; 374 } 375 376 ret = reset_control_deassert(res->phy_reset); 377 if (ret) { 378 dev_err(dev, "cannot deassert phy reset\n"); 379 goto err_deassert_phy; 380 } 381 382 ret = reset_control_deassert(res->pci_reset); 383 if (ret) { 384 dev_err(dev, "cannot deassert pci reset\n"); 385 goto err_deassert_pci; 386 } 387 388 ret = reset_control_deassert(res->por_reset); 389 if (ret) { 390 dev_err(dev, "cannot deassert por reset\n"); 391 goto err_deassert_por; 392 } 393 394 ret = reset_control_deassert(res->axi_reset); 395 if (ret) { 396 dev_err(dev, "cannot deassert axi reset\n"); 397 goto err_deassert_axi; 398 } 399 400 return 0; 401 402 err_deassert_axi: 403 reset_control_assert(res->por_reset); 404 err_deassert_por: 405 reset_control_assert(res->pci_reset); 406 err_deassert_pci: 407 reset_control_assert(res->phy_reset); 408 err_deassert_phy: 409 reset_control_assert(res->ext_reset); 410 err_deassert_ext: 411 reset_control_assert(res->ahb_reset); 412 err_deassert_ahb: 413 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 414 415 return ret; 416 } 417 418 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 419 { 420 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 421 struct dw_pcie *pci = pcie->pci; 422 struct device *dev = pci->dev; 423 struct device_node *node = dev->of_node; 424 u32 val; 425 int ret; 426 427 /* enable PCIe clocks and resets */ 428 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 429 val &= ~BIT(0); 430 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 431 432 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 433 if (ret) 434 return ret; 435 436 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 437 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 438 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 439 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 440 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 441 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 442 writel(PCS_SWING_TX_SWING_FULL(120) | 443 PCS_SWING_TX_SWING_LOW(120), 444 pcie->parf + PCIE20_PARF_PCS_SWING); 445 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 446 } 447 448 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 449 /* set TX termination offset */ 450 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 451 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 452 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 453 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 454 } 455 456 /* enable external reference clock */ 457 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 458 /* USE_PAD is required only for ipq806x */ 459 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 460 val &= ~PHY_REFCLK_USE_PAD; 461 val |= PHY_REFCLK_SSP_EN; 462 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 463 464 /* wait for clock acquisition */ 465 usleep_range(1000, 1500); 466 467 /* Set the Max TLP size to 2K, instead of using default of 4K */ 468 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 469 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 470 writel(CFG_BRIDGE_SB_INIT, 471 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 472 473 return 0; 474 } 475 476 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 477 { 478 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 479 struct dw_pcie *pci = pcie->pci; 480 struct device *dev = pci->dev; 481 482 res->vdda = devm_regulator_get(dev, "vdda"); 483 if (IS_ERR(res->vdda)) 484 return PTR_ERR(res->vdda); 485 486 res->iface = devm_clk_get(dev, "iface"); 487 if (IS_ERR(res->iface)) 488 return PTR_ERR(res->iface); 489 490 res->aux = devm_clk_get(dev, "aux"); 491 if (IS_ERR(res->aux)) 492 return PTR_ERR(res->aux); 493 494 res->master_bus = devm_clk_get(dev, "master_bus"); 495 if (IS_ERR(res->master_bus)) 496 return PTR_ERR(res->master_bus); 497 498 res->slave_bus = devm_clk_get(dev, "slave_bus"); 499 if (IS_ERR(res->slave_bus)) 500 return PTR_ERR(res->slave_bus); 501 502 res->core = devm_reset_control_get_exclusive(dev, "core"); 503 return PTR_ERR_OR_ZERO(res->core); 504 } 505 506 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 507 { 508 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 509 510 reset_control_assert(res->core); 511 clk_disable_unprepare(res->slave_bus); 512 clk_disable_unprepare(res->master_bus); 513 clk_disable_unprepare(res->iface); 514 clk_disable_unprepare(res->aux); 515 regulator_disable(res->vdda); 516 } 517 518 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 519 { 520 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 521 struct dw_pcie *pci = pcie->pci; 522 struct device *dev = pci->dev; 523 int ret; 524 525 ret = reset_control_deassert(res->core); 526 if (ret) { 527 dev_err(dev, "cannot deassert core reset\n"); 528 return ret; 529 } 530 531 ret = clk_prepare_enable(res->aux); 532 if (ret) { 533 dev_err(dev, "cannot prepare/enable aux clock\n"); 534 goto err_res; 535 } 536 537 ret = clk_prepare_enable(res->iface); 538 if (ret) { 539 dev_err(dev, "cannot prepare/enable iface clock\n"); 540 goto err_aux; 541 } 542 543 ret = clk_prepare_enable(res->master_bus); 544 if (ret) { 545 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 546 goto err_iface; 547 } 548 549 ret = clk_prepare_enable(res->slave_bus); 550 if (ret) { 551 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 552 goto err_master; 553 } 554 555 ret = regulator_enable(res->vdda); 556 if (ret) { 557 dev_err(dev, "cannot enable vdda regulator\n"); 558 goto err_slave; 559 } 560 561 return 0; 562 err_slave: 563 clk_disable_unprepare(res->slave_bus); 564 err_master: 565 clk_disable_unprepare(res->master_bus); 566 err_iface: 567 clk_disable_unprepare(res->iface); 568 err_aux: 569 clk_disable_unprepare(res->aux); 570 err_res: 571 reset_control_assert(res->core); 572 573 return ret; 574 } 575 576 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 577 { 578 /* change DBI base address */ 579 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 580 581 if (IS_ENABLED(CONFIG_PCI_MSI)) { 582 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 583 584 val |= BIT(31); 585 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 586 } 587 588 return 0; 589 } 590 591 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 592 { 593 u32 val; 594 595 /* enable link training */ 596 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 597 val |= BIT(8); 598 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 599 } 600 601 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 602 { 603 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 604 struct dw_pcie *pci = pcie->pci; 605 struct device *dev = pci->dev; 606 int ret; 607 608 res->supplies[0].supply = "vdda"; 609 res->supplies[1].supply = "vddpe-3v3"; 610 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 611 res->supplies); 612 if (ret) 613 return ret; 614 615 res->aux_clk = devm_clk_get(dev, "aux"); 616 if (IS_ERR(res->aux_clk)) 617 return PTR_ERR(res->aux_clk); 618 619 res->cfg_clk = devm_clk_get(dev, "cfg"); 620 if (IS_ERR(res->cfg_clk)) 621 return PTR_ERR(res->cfg_clk); 622 623 res->master_clk = devm_clk_get(dev, "bus_master"); 624 if (IS_ERR(res->master_clk)) 625 return PTR_ERR(res->master_clk); 626 627 res->slave_clk = devm_clk_get(dev, "bus_slave"); 628 if (IS_ERR(res->slave_clk)) 629 return PTR_ERR(res->slave_clk); 630 631 return 0; 632 } 633 634 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 635 { 636 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 637 638 clk_disable_unprepare(res->slave_clk); 639 clk_disable_unprepare(res->master_clk); 640 clk_disable_unprepare(res->cfg_clk); 641 clk_disable_unprepare(res->aux_clk); 642 643 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 644 } 645 646 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 647 { 648 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 649 struct dw_pcie *pci = pcie->pci; 650 struct device *dev = pci->dev; 651 int ret; 652 653 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 654 if (ret < 0) { 655 dev_err(dev, "cannot enable regulators\n"); 656 return ret; 657 } 658 659 ret = clk_prepare_enable(res->aux_clk); 660 if (ret) { 661 dev_err(dev, "cannot prepare/enable aux clock\n"); 662 goto err_aux_clk; 663 } 664 665 ret = clk_prepare_enable(res->cfg_clk); 666 if (ret) { 667 dev_err(dev, "cannot prepare/enable cfg clock\n"); 668 goto err_cfg_clk; 669 } 670 671 ret = clk_prepare_enable(res->master_clk); 672 if (ret) { 673 dev_err(dev, "cannot prepare/enable master clock\n"); 674 goto err_master_clk; 675 } 676 677 ret = clk_prepare_enable(res->slave_clk); 678 if (ret) { 679 dev_err(dev, "cannot prepare/enable slave clock\n"); 680 goto err_slave_clk; 681 } 682 683 return 0; 684 685 err_slave_clk: 686 clk_disable_unprepare(res->master_clk); 687 err_master_clk: 688 clk_disable_unprepare(res->cfg_clk); 689 err_cfg_clk: 690 clk_disable_unprepare(res->aux_clk); 691 692 err_aux_clk: 693 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 694 695 return ret; 696 } 697 698 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 699 { 700 u32 val; 701 702 /* enable PCIe clocks and resets */ 703 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 704 val &= ~BIT(0); 705 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 706 707 /* change DBI base address */ 708 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 709 710 /* MAC PHY_POWERDOWN MUX DISABLE */ 711 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 712 val &= ~BIT(29); 713 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 714 715 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 716 val |= BIT(4); 717 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 718 719 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 720 val |= BIT(31); 721 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 722 723 return 0; 724 } 725 726 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 727 { 728 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 729 struct dw_pcie *pci = pcie->pci; 730 struct device *dev = pci->dev; 731 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 732 int ret; 733 734 res->clks[0].id = "aux"; 735 res->clks[1].id = "master_bus"; 736 res->clks[2].id = "slave_bus"; 737 res->clks[3].id = "iface"; 738 739 /* qcom,pcie-ipq4019 is defined without "iface" */ 740 res->num_clks = is_ipq ? 3 : 4; 741 742 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 743 if (ret < 0) 744 return ret; 745 746 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 747 if (IS_ERR(res->axi_m_reset)) 748 return PTR_ERR(res->axi_m_reset); 749 750 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 751 if (IS_ERR(res->axi_s_reset)) 752 return PTR_ERR(res->axi_s_reset); 753 754 if (is_ipq) { 755 /* 756 * These resources relates to the PHY or are secure clocks, but 757 * are controlled here for IPQ4019 758 */ 759 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 760 if (IS_ERR(res->pipe_reset)) 761 return PTR_ERR(res->pipe_reset); 762 763 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 764 "axi_m_vmid"); 765 if (IS_ERR(res->axi_m_vmid_reset)) 766 return PTR_ERR(res->axi_m_vmid_reset); 767 768 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 769 "axi_s_xpu"); 770 if (IS_ERR(res->axi_s_xpu_reset)) 771 return PTR_ERR(res->axi_s_xpu_reset); 772 773 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 774 if (IS_ERR(res->parf_reset)) 775 return PTR_ERR(res->parf_reset); 776 777 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 778 if (IS_ERR(res->phy_reset)) 779 return PTR_ERR(res->phy_reset); 780 } 781 782 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 783 "axi_m_sticky"); 784 if (IS_ERR(res->axi_m_sticky_reset)) 785 return PTR_ERR(res->axi_m_sticky_reset); 786 787 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 788 "pipe_sticky"); 789 if (IS_ERR(res->pipe_sticky_reset)) 790 return PTR_ERR(res->pipe_sticky_reset); 791 792 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 793 if (IS_ERR(res->pwr_reset)) 794 return PTR_ERR(res->pwr_reset); 795 796 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 797 if (IS_ERR(res->ahb_reset)) 798 return PTR_ERR(res->ahb_reset); 799 800 if (is_ipq) { 801 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 802 if (IS_ERR(res->phy_ahb_reset)) 803 return PTR_ERR(res->phy_ahb_reset); 804 } 805 806 return 0; 807 } 808 809 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 810 { 811 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 812 813 reset_control_assert(res->axi_m_reset); 814 reset_control_assert(res->axi_s_reset); 815 reset_control_assert(res->pipe_reset); 816 reset_control_assert(res->pipe_sticky_reset); 817 reset_control_assert(res->phy_reset); 818 reset_control_assert(res->phy_ahb_reset); 819 reset_control_assert(res->axi_m_sticky_reset); 820 reset_control_assert(res->pwr_reset); 821 reset_control_assert(res->ahb_reset); 822 clk_bulk_disable_unprepare(res->num_clks, res->clks); 823 } 824 825 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 826 { 827 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 828 struct dw_pcie *pci = pcie->pci; 829 struct device *dev = pci->dev; 830 int ret; 831 832 ret = reset_control_assert(res->axi_m_reset); 833 if (ret) { 834 dev_err(dev, "cannot assert axi master reset\n"); 835 return ret; 836 } 837 838 ret = reset_control_assert(res->axi_s_reset); 839 if (ret) { 840 dev_err(dev, "cannot assert axi slave reset\n"); 841 return ret; 842 } 843 844 usleep_range(10000, 12000); 845 846 ret = reset_control_assert(res->pipe_reset); 847 if (ret) { 848 dev_err(dev, "cannot assert pipe reset\n"); 849 return ret; 850 } 851 852 ret = reset_control_assert(res->pipe_sticky_reset); 853 if (ret) { 854 dev_err(dev, "cannot assert pipe sticky reset\n"); 855 return ret; 856 } 857 858 ret = reset_control_assert(res->phy_reset); 859 if (ret) { 860 dev_err(dev, "cannot assert phy reset\n"); 861 return ret; 862 } 863 864 ret = reset_control_assert(res->phy_ahb_reset); 865 if (ret) { 866 dev_err(dev, "cannot assert phy ahb reset\n"); 867 return ret; 868 } 869 870 usleep_range(10000, 12000); 871 872 ret = reset_control_assert(res->axi_m_sticky_reset); 873 if (ret) { 874 dev_err(dev, "cannot assert axi master sticky reset\n"); 875 return ret; 876 } 877 878 ret = reset_control_assert(res->pwr_reset); 879 if (ret) { 880 dev_err(dev, "cannot assert power reset\n"); 881 return ret; 882 } 883 884 ret = reset_control_assert(res->ahb_reset); 885 if (ret) { 886 dev_err(dev, "cannot assert ahb reset\n"); 887 return ret; 888 } 889 890 usleep_range(10000, 12000); 891 892 ret = reset_control_deassert(res->phy_ahb_reset); 893 if (ret) { 894 dev_err(dev, "cannot deassert phy ahb reset\n"); 895 return ret; 896 } 897 898 ret = reset_control_deassert(res->phy_reset); 899 if (ret) { 900 dev_err(dev, "cannot deassert phy reset\n"); 901 goto err_rst_phy; 902 } 903 904 ret = reset_control_deassert(res->pipe_reset); 905 if (ret) { 906 dev_err(dev, "cannot deassert pipe reset\n"); 907 goto err_rst_pipe; 908 } 909 910 ret = reset_control_deassert(res->pipe_sticky_reset); 911 if (ret) { 912 dev_err(dev, "cannot deassert pipe sticky reset\n"); 913 goto err_rst_pipe_sticky; 914 } 915 916 usleep_range(10000, 12000); 917 918 ret = reset_control_deassert(res->axi_m_reset); 919 if (ret) { 920 dev_err(dev, "cannot deassert axi master reset\n"); 921 goto err_rst_axi_m; 922 } 923 924 ret = reset_control_deassert(res->axi_m_sticky_reset); 925 if (ret) { 926 dev_err(dev, "cannot deassert axi master sticky reset\n"); 927 goto err_rst_axi_m_sticky; 928 } 929 930 ret = reset_control_deassert(res->axi_s_reset); 931 if (ret) { 932 dev_err(dev, "cannot deassert axi slave reset\n"); 933 goto err_rst_axi_s; 934 } 935 936 ret = reset_control_deassert(res->pwr_reset); 937 if (ret) { 938 dev_err(dev, "cannot deassert power reset\n"); 939 goto err_rst_pwr; 940 } 941 942 ret = reset_control_deassert(res->ahb_reset); 943 if (ret) { 944 dev_err(dev, "cannot deassert ahb reset\n"); 945 goto err_rst_ahb; 946 } 947 948 usleep_range(10000, 12000); 949 950 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 951 if (ret) 952 goto err_clks; 953 954 return 0; 955 956 err_clks: 957 reset_control_assert(res->ahb_reset); 958 err_rst_ahb: 959 reset_control_assert(res->pwr_reset); 960 err_rst_pwr: 961 reset_control_assert(res->axi_s_reset); 962 err_rst_axi_s: 963 reset_control_assert(res->axi_m_sticky_reset); 964 err_rst_axi_m_sticky: 965 reset_control_assert(res->axi_m_reset); 966 err_rst_axi_m: 967 reset_control_assert(res->pipe_sticky_reset); 968 err_rst_pipe_sticky: 969 reset_control_assert(res->pipe_reset); 970 err_rst_pipe: 971 reset_control_assert(res->phy_reset); 972 err_rst_phy: 973 reset_control_assert(res->phy_ahb_reset); 974 return ret; 975 } 976 977 static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) 978 { 979 u32 val; 980 981 /* enable PCIe clocks and resets */ 982 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 983 val &= ~BIT(0); 984 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 985 986 /* change DBI base address */ 987 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 988 989 /* MAC PHY_POWERDOWN MUX DISABLE */ 990 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 991 val &= ~BIT(29); 992 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 993 994 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 995 val |= BIT(4); 996 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 997 998 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 999 val |= BIT(31); 1000 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 1001 1002 return 0; 1003 } 1004 1005 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 1006 { 1007 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1008 struct dw_pcie *pci = pcie->pci; 1009 struct device *dev = pci->dev; 1010 int i; 1011 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 1012 "axi_m_sticky", "sticky", 1013 "ahb", "sleep", }; 1014 1015 res->iface = devm_clk_get(dev, "iface"); 1016 if (IS_ERR(res->iface)) 1017 return PTR_ERR(res->iface); 1018 1019 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 1020 if (IS_ERR(res->axi_m_clk)) 1021 return PTR_ERR(res->axi_m_clk); 1022 1023 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 1024 if (IS_ERR(res->axi_s_clk)) 1025 return PTR_ERR(res->axi_s_clk); 1026 1027 res->ahb_clk = devm_clk_get(dev, "ahb"); 1028 if (IS_ERR(res->ahb_clk)) 1029 return PTR_ERR(res->ahb_clk); 1030 1031 res->aux_clk = devm_clk_get(dev, "aux"); 1032 if (IS_ERR(res->aux_clk)) 1033 return PTR_ERR(res->aux_clk); 1034 1035 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1036 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1037 if (IS_ERR(res->rst[i])) 1038 return PTR_ERR(res->rst[i]); 1039 } 1040 1041 return 0; 1042 } 1043 1044 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1045 { 1046 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1047 1048 clk_disable_unprepare(res->iface); 1049 clk_disable_unprepare(res->axi_m_clk); 1050 clk_disable_unprepare(res->axi_s_clk); 1051 clk_disable_unprepare(res->ahb_clk); 1052 clk_disable_unprepare(res->aux_clk); 1053 } 1054 1055 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1056 { 1057 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1058 struct dw_pcie *pci = pcie->pci; 1059 struct device *dev = pci->dev; 1060 int i, ret; 1061 1062 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1063 ret = reset_control_assert(res->rst[i]); 1064 if (ret) { 1065 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1066 return ret; 1067 } 1068 } 1069 1070 usleep_range(2000, 2500); 1071 1072 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1073 ret = reset_control_deassert(res->rst[i]); 1074 if (ret) { 1075 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1076 ret); 1077 return ret; 1078 } 1079 } 1080 1081 /* 1082 * Don't have a way to see if the reset has completed. 1083 * Wait for some time. 1084 */ 1085 usleep_range(2000, 2500); 1086 1087 ret = clk_prepare_enable(res->iface); 1088 if (ret) { 1089 dev_err(dev, "cannot prepare/enable core clock\n"); 1090 goto err_clk_iface; 1091 } 1092 1093 ret = clk_prepare_enable(res->axi_m_clk); 1094 if (ret) { 1095 dev_err(dev, "cannot prepare/enable core clock\n"); 1096 goto err_clk_axi_m; 1097 } 1098 1099 ret = clk_prepare_enable(res->axi_s_clk); 1100 if (ret) { 1101 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1102 goto err_clk_axi_s; 1103 } 1104 1105 ret = clk_prepare_enable(res->ahb_clk); 1106 if (ret) { 1107 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1108 goto err_clk_ahb; 1109 } 1110 1111 ret = clk_prepare_enable(res->aux_clk); 1112 if (ret) { 1113 dev_err(dev, "cannot prepare/enable aux clock\n"); 1114 goto err_clk_aux; 1115 } 1116 1117 return 0; 1118 1119 err_clk_aux: 1120 clk_disable_unprepare(res->ahb_clk); 1121 err_clk_ahb: 1122 clk_disable_unprepare(res->axi_s_clk); 1123 err_clk_axi_s: 1124 clk_disable_unprepare(res->axi_m_clk); 1125 err_clk_axi_m: 1126 clk_disable_unprepare(res->iface); 1127 err_clk_iface: 1128 /* 1129 * Not checking for failure, will anyway return 1130 * the original failure in 'ret'. 1131 */ 1132 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1133 reset_control_assert(res->rst[i]); 1134 1135 return ret; 1136 } 1137 1138 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 1139 { 1140 struct dw_pcie *pci = pcie->pci; 1141 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1142 u32 val; 1143 1144 writel(SLV_ADDR_SPACE_SZ, 1145 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1146 1147 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1148 val &= ~BIT(0); 1149 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1150 1151 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1152 1153 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1154 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1155 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1156 pcie->parf + PCIE20_PARF_SYS_CTRL); 1157 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1158 1159 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1160 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1161 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1162 1163 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1164 val &= ~PCI_EXP_LNKCAP_ASPMS; 1165 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1166 1167 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1168 PCI_EXP_DEVCTL2); 1169 1170 return 0; 1171 } 1172 1173 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1174 { 1175 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1176 struct dw_pcie *pci = pcie->pci; 1177 struct device *dev = pci->dev; 1178 unsigned int idx; 1179 int ret; 1180 1181 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1182 if (IS_ERR(res->pci_reset)) 1183 return PTR_ERR(res->pci_reset); 1184 1185 res->supplies[0].supply = "vdda"; 1186 res->supplies[1].supply = "vddpe-3v3"; 1187 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1188 res->supplies); 1189 if (ret) 1190 return ret; 1191 1192 idx = 0; 1193 res->clks[idx++].id = "aux"; 1194 res->clks[idx++].id = "cfg"; 1195 res->clks[idx++].id = "bus_master"; 1196 res->clks[idx++].id = "bus_slave"; 1197 res->clks[idx++].id = "slave_q2a"; 1198 if (pcie->cfg->has_tbu_clk) 1199 res->clks[idx++].id = "tbu"; 1200 if (pcie->cfg->has_ddrss_sf_tbu_clk) 1201 res->clks[idx++].id = "ddrss_sf_tbu"; 1202 if (pcie->cfg->has_aggre0_clk) 1203 res->clks[idx++].id = "aggre0"; 1204 if (pcie->cfg->has_aggre1_clk) 1205 res->clks[idx++].id = "aggre1"; 1206 1207 res->num_clks = idx; 1208 1209 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 1210 if (ret < 0) 1211 return ret; 1212 1213 return 0; 1214 } 1215 1216 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1217 { 1218 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1219 struct dw_pcie *pci = pcie->pci; 1220 struct device *dev = pci->dev; 1221 u32 val; 1222 int ret; 1223 1224 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1225 if (ret < 0) { 1226 dev_err(dev, "cannot enable regulators\n"); 1227 return ret; 1228 } 1229 1230 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 1231 if (ret < 0) 1232 goto err_disable_regulators; 1233 1234 ret = reset_control_assert(res->pci_reset); 1235 if (ret < 0) { 1236 dev_err(dev, "cannot deassert pci reset\n"); 1237 goto err_disable_clocks; 1238 } 1239 1240 usleep_range(1000, 1500); 1241 1242 ret = reset_control_deassert(res->pci_reset); 1243 if (ret < 0) { 1244 dev_err(dev, "cannot deassert pci reset\n"); 1245 goto err_disable_clocks; 1246 } 1247 1248 /* Wait for reset to complete, required on SM8450 */ 1249 usleep_range(1000, 1500); 1250 1251 /* configure PCIe to RC mode */ 1252 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1253 1254 /* enable PCIe clocks and resets */ 1255 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1256 val &= ~BIT(0); 1257 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1258 1259 /* change DBI base address */ 1260 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1261 1262 /* MAC PHY_POWERDOWN MUX DISABLE */ 1263 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1264 val &= ~BIT(29); 1265 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1266 1267 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1268 val |= BIT(4); 1269 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1270 1271 /* Enable L1 and L1SS */ 1272 val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); 1273 val &= ~REQ_NOT_ENTR_L1; 1274 writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); 1275 1276 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1277 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1278 val |= BIT(31); 1279 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1280 } 1281 1282 return 0; 1283 err_disable_clocks: 1284 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1285 err_disable_regulators: 1286 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1287 1288 return ret; 1289 } 1290 1291 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1292 { 1293 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1294 1295 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1296 1297 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1298 } 1299 1300 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1301 { 1302 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1303 struct dw_pcie *pci = pcie->pci; 1304 struct device *dev = pci->dev; 1305 int ret; 1306 1307 res->clks[0].id = "iface"; 1308 res->clks[1].id = "axi_m"; 1309 res->clks[2].id = "axi_s"; 1310 res->clks[3].id = "axi_bridge"; 1311 res->clks[4].id = "rchng"; 1312 1313 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1314 if (ret < 0) 1315 return ret; 1316 1317 res->rst = devm_reset_control_array_get_exclusive(dev); 1318 if (IS_ERR(res->rst)) 1319 return PTR_ERR(res->rst); 1320 1321 return 0; 1322 } 1323 1324 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1325 { 1326 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1327 1328 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1329 } 1330 1331 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1332 { 1333 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1334 struct device *dev = pcie->pci->dev; 1335 int ret; 1336 1337 ret = reset_control_assert(res->rst); 1338 if (ret) { 1339 dev_err(dev, "reset assert failed (%d)\n", ret); 1340 return ret; 1341 } 1342 1343 /* 1344 * Delay periods before and after reset deassert are working values 1345 * from downstream Codeaurora kernel 1346 */ 1347 usleep_range(2000, 2500); 1348 1349 ret = reset_control_deassert(res->rst); 1350 if (ret) { 1351 dev_err(dev, "reset deassert failed (%d)\n", ret); 1352 return ret; 1353 } 1354 1355 usleep_range(2000, 2500); 1356 1357 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1358 } 1359 1360 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1361 { 1362 struct dw_pcie *pci = pcie->pci; 1363 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1364 u32 val; 1365 int i; 1366 1367 writel(SLV_ADDR_SPACE_SZ, 1368 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1369 1370 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1371 val &= ~BIT(0); 1372 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1373 1374 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1375 1376 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1377 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1378 pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1379 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1380 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1381 pci->dbi_base + GEN3_RELATED_OFF); 1382 1383 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1384 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1385 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1386 pcie->parf + PCIE20_PARF_SYS_CTRL); 1387 1388 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1389 1390 dw_pcie_dbi_ro_wr_en(pci); 1391 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1392 1393 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1394 val &= ~PCI_EXP_LNKCAP_ASPMS; 1395 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1396 1397 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1398 PCI_EXP_DEVCTL2); 1399 1400 for (i = 0; i < 256; i++) 1401 writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1402 1403 return 0; 1404 } 1405 1406 static int qcom_pcie_link_up(struct dw_pcie *pci) 1407 { 1408 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1409 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1410 1411 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1412 } 1413 1414 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1415 { 1416 /* iommu map structure */ 1417 struct { 1418 u32 bdf; 1419 u32 phandle; 1420 u32 smmu_sid; 1421 u32 smmu_sid_len; 1422 } *map; 1423 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1424 struct device *dev = pcie->pci->dev; 1425 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1426 int i, nr_map, size = 0; 1427 u32 smmu_sid_base; 1428 1429 of_get_property(dev->of_node, "iommu-map", &size); 1430 if (!size) 1431 return 0; 1432 1433 map = kzalloc(size, GFP_KERNEL); 1434 if (!map) 1435 return -ENOMEM; 1436 1437 of_property_read_u32_array(dev->of_node, 1438 "iommu-map", (u32 *)map, size / sizeof(u32)); 1439 1440 nr_map = size / (sizeof(*map)); 1441 1442 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1443 1444 /* Registers need to be zero out first */ 1445 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1446 1447 /* Extract the SMMU SID base from the first entry of iommu-map */ 1448 smmu_sid_base = map[0].smmu_sid; 1449 1450 /* Look for an available entry to hold the mapping */ 1451 for (i = 0; i < nr_map; i++) { 1452 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1453 u32 val; 1454 u8 hash; 1455 1456 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1457 0); 1458 1459 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1460 1461 /* If the register is already populated, look for next available entry */ 1462 while (val) { 1463 u8 current_hash = hash++; 1464 u8 next_mask = 0xff; 1465 1466 /* If NEXT field is NULL then update it with next hash */ 1467 if (!(val & next_mask)) { 1468 val |= (u32)hash; 1469 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1470 } 1471 1472 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1473 } 1474 1475 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1476 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1477 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1478 } 1479 1480 kfree(map); 1481 1482 return 0; 1483 } 1484 1485 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1486 { 1487 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1488 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1489 int ret; 1490 1491 qcom_ep_reset_assert(pcie); 1492 1493 ret = pcie->cfg->ops->init(pcie); 1494 if (ret) 1495 return ret; 1496 1497 ret = phy_power_on(pcie->phy); 1498 if (ret) 1499 goto err_deinit; 1500 1501 if (pcie->cfg->ops->post_init) { 1502 ret = pcie->cfg->ops->post_init(pcie); 1503 if (ret) 1504 goto err_disable_phy; 1505 } 1506 1507 qcom_ep_reset_deassert(pcie); 1508 1509 if (pcie->cfg->ops->config_sid) { 1510 ret = pcie->cfg->ops->config_sid(pcie); 1511 if (ret) 1512 goto err; 1513 } 1514 1515 return 0; 1516 1517 err: 1518 qcom_ep_reset_assert(pcie); 1519 if (pcie->cfg->ops->post_deinit) 1520 pcie->cfg->ops->post_deinit(pcie); 1521 err_disable_phy: 1522 phy_power_off(pcie->phy); 1523 err_deinit: 1524 pcie->cfg->ops->deinit(pcie); 1525 1526 return ret; 1527 } 1528 1529 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1530 .host_init = qcom_pcie_host_init, 1531 }; 1532 1533 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1534 static const struct qcom_pcie_ops ops_2_1_0 = { 1535 .get_resources = qcom_pcie_get_resources_2_1_0, 1536 .init = qcom_pcie_init_2_1_0, 1537 .post_init = qcom_pcie_post_init_2_1_0, 1538 .deinit = qcom_pcie_deinit_2_1_0, 1539 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1540 }; 1541 1542 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1543 static const struct qcom_pcie_ops ops_1_0_0 = { 1544 .get_resources = qcom_pcie_get_resources_1_0_0, 1545 .init = qcom_pcie_init_1_0_0, 1546 .post_init = qcom_pcie_post_init_1_0_0, 1547 .deinit = qcom_pcie_deinit_1_0_0, 1548 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1549 }; 1550 1551 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1552 static const struct qcom_pcie_ops ops_2_3_2 = { 1553 .get_resources = qcom_pcie_get_resources_2_3_2, 1554 .init = qcom_pcie_init_2_3_2, 1555 .post_init = qcom_pcie_post_init_2_3_2, 1556 .deinit = qcom_pcie_deinit_2_3_2, 1557 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1558 }; 1559 1560 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1561 static const struct qcom_pcie_ops ops_2_4_0 = { 1562 .get_resources = qcom_pcie_get_resources_2_4_0, 1563 .init = qcom_pcie_init_2_4_0, 1564 .post_init = qcom_pcie_post_init_2_4_0, 1565 .deinit = qcom_pcie_deinit_2_4_0, 1566 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1567 }; 1568 1569 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1570 static const struct qcom_pcie_ops ops_2_3_3 = { 1571 .get_resources = qcom_pcie_get_resources_2_3_3, 1572 .init = qcom_pcie_init_2_3_3, 1573 .post_init = qcom_pcie_post_init_2_3_3, 1574 .deinit = qcom_pcie_deinit_2_3_3, 1575 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1576 }; 1577 1578 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1579 static const struct qcom_pcie_ops ops_2_7_0 = { 1580 .get_resources = qcom_pcie_get_resources_2_7_0, 1581 .init = qcom_pcie_init_2_7_0, 1582 .deinit = qcom_pcie_deinit_2_7_0, 1583 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1584 }; 1585 1586 /* Qcom IP rev.: 1.9.0 */ 1587 static const struct qcom_pcie_ops ops_1_9_0 = { 1588 .get_resources = qcom_pcie_get_resources_2_7_0, 1589 .init = qcom_pcie_init_2_7_0, 1590 .deinit = qcom_pcie_deinit_2_7_0, 1591 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1592 .config_sid = qcom_pcie_config_sid_sm8250, 1593 }; 1594 1595 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1596 static const struct qcom_pcie_ops ops_2_9_0 = { 1597 .get_resources = qcom_pcie_get_resources_2_9_0, 1598 .init = qcom_pcie_init_2_9_0, 1599 .post_init = qcom_pcie_post_init_2_9_0, 1600 .deinit = qcom_pcie_deinit_2_9_0, 1601 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1602 }; 1603 1604 static const struct qcom_pcie_cfg apq8084_cfg = { 1605 .ops = &ops_1_0_0, 1606 }; 1607 1608 static const struct qcom_pcie_cfg ipq8064_cfg = { 1609 .ops = &ops_2_1_0, 1610 }; 1611 1612 static const struct qcom_pcie_cfg msm8996_cfg = { 1613 .ops = &ops_2_3_2, 1614 }; 1615 1616 static const struct qcom_pcie_cfg ipq8074_cfg = { 1617 .ops = &ops_2_3_3, 1618 }; 1619 1620 static const struct qcom_pcie_cfg ipq4019_cfg = { 1621 .ops = &ops_2_4_0, 1622 }; 1623 1624 static const struct qcom_pcie_cfg sdm845_cfg = { 1625 .ops = &ops_2_7_0, 1626 .has_tbu_clk = true, 1627 }; 1628 1629 static const struct qcom_pcie_cfg sm8150_cfg = { 1630 /* sm8150 has qcom IP rev 1.5.0. However 1.5.0 ops are same as 1631 * 1.9.0, so reuse the same. 1632 */ 1633 .ops = &ops_1_9_0, 1634 }; 1635 1636 static const struct qcom_pcie_cfg sm8250_cfg = { 1637 .ops = &ops_1_9_0, 1638 .has_tbu_clk = true, 1639 .has_ddrss_sf_tbu_clk = true, 1640 }; 1641 1642 static const struct qcom_pcie_cfg sm8450_pcie0_cfg = { 1643 .ops = &ops_1_9_0, 1644 .has_ddrss_sf_tbu_clk = true, 1645 .has_aggre0_clk = true, 1646 .has_aggre1_clk = true, 1647 }; 1648 1649 static const struct qcom_pcie_cfg sm8450_pcie1_cfg = { 1650 .ops = &ops_1_9_0, 1651 .has_ddrss_sf_tbu_clk = true, 1652 .has_aggre1_clk = true, 1653 }; 1654 1655 static const struct qcom_pcie_cfg sc7280_cfg = { 1656 .ops = &ops_1_9_0, 1657 .has_tbu_clk = true, 1658 }; 1659 1660 static const struct qcom_pcie_cfg sc8180x_cfg = { 1661 .ops = &ops_1_9_0, 1662 .has_tbu_clk = true, 1663 }; 1664 1665 static const struct qcom_pcie_cfg ipq6018_cfg = { 1666 .ops = &ops_2_9_0, 1667 }; 1668 1669 static const struct dw_pcie_ops dw_pcie_ops = { 1670 .link_up = qcom_pcie_link_up, 1671 .start_link = qcom_pcie_start_link, 1672 }; 1673 1674 static int qcom_pcie_probe(struct platform_device *pdev) 1675 { 1676 struct device *dev = &pdev->dev; 1677 struct dw_pcie_rp *pp; 1678 struct dw_pcie *pci; 1679 struct qcom_pcie *pcie; 1680 const struct qcom_pcie_cfg *pcie_cfg; 1681 int ret; 1682 1683 pcie_cfg = of_device_get_match_data(dev); 1684 if (!pcie_cfg || !pcie_cfg->ops) { 1685 dev_err(dev, "Invalid platform data\n"); 1686 return -EINVAL; 1687 } 1688 1689 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1690 if (!pcie) 1691 return -ENOMEM; 1692 1693 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1694 if (!pci) 1695 return -ENOMEM; 1696 1697 pm_runtime_enable(dev); 1698 ret = pm_runtime_get_sync(dev); 1699 if (ret < 0) 1700 goto err_pm_runtime_put; 1701 1702 pci->dev = dev; 1703 pci->ops = &dw_pcie_ops; 1704 pp = &pci->pp; 1705 1706 pcie->pci = pci; 1707 1708 pcie->cfg = pcie_cfg; 1709 1710 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1711 if (IS_ERR(pcie->reset)) { 1712 ret = PTR_ERR(pcie->reset); 1713 goto err_pm_runtime_put; 1714 } 1715 1716 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1717 if (IS_ERR(pcie->parf)) { 1718 ret = PTR_ERR(pcie->parf); 1719 goto err_pm_runtime_put; 1720 } 1721 1722 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1723 if (IS_ERR(pcie->elbi)) { 1724 ret = PTR_ERR(pcie->elbi); 1725 goto err_pm_runtime_put; 1726 } 1727 1728 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1729 if (IS_ERR(pcie->phy)) { 1730 ret = PTR_ERR(pcie->phy); 1731 goto err_pm_runtime_put; 1732 } 1733 1734 ret = pcie->cfg->ops->get_resources(pcie); 1735 if (ret) 1736 goto err_pm_runtime_put; 1737 1738 pp->ops = &qcom_pcie_dw_ops; 1739 1740 ret = phy_init(pcie->phy); 1741 if (ret) 1742 goto err_pm_runtime_put; 1743 1744 platform_set_drvdata(pdev, pcie); 1745 1746 ret = dw_pcie_host_init(pp); 1747 if (ret) { 1748 dev_err(dev, "cannot initialize host\n"); 1749 goto err_phy_exit; 1750 } 1751 1752 return 0; 1753 1754 err_phy_exit: 1755 phy_exit(pcie->phy); 1756 err_pm_runtime_put: 1757 pm_runtime_put(dev); 1758 pm_runtime_disable(dev); 1759 1760 return ret; 1761 } 1762 1763 static const struct of_device_id qcom_pcie_match[] = { 1764 { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg }, 1765 { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg }, 1766 { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg }, 1767 { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg }, 1768 { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg }, 1769 { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg }, 1770 { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg }, 1771 { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg }, 1772 { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg }, 1773 { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg }, 1774 { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg }, 1775 { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg }, 1776 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg }, 1777 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg }, 1778 { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg }, 1779 { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg }, 1780 { } 1781 }; 1782 1783 static void qcom_fixup_class(struct pci_dev *dev) 1784 { 1785 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1786 } 1787 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1788 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1789 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1790 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1791 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1792 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1793 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1794 1795 static struct platform_driver qcom_pcie_driver = { 1796 .probe = qcom_pcie_probe, 1797 .driver = { 1798 .name = "qcom-pcie", 1799 .suppress_bind_attrs = true, 1800 .of_match_table = qcom_pcie_match, 1801 }, 1802 }; 1803 builtin_platform_driver(qcom_pcie_driver); 1804