1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/pci.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/platform_device.h> 25 #include <linux/phy/phy.h> 26 #include <linux/regulator/consumer.h> 27 #include <linux/reset.h> 28 #include <linux/slab.h> 29 #include <linux/types.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 #define PCIE20_PARF_SYS_CTRL 0x00 35 #define MST_WAKEUP_EN BIT(13) 36 #define SLV_WAKEUP_EN BIT(12) 37 #define MSTR_ACLK_CGC_DIS BIT(10) 38 #define SLV_ACLK_CGC_DIS BIT(9) 39 #define CORE_CLK_CGC_DIS BIT(6) 40 #define AUX_PWR_DET BIT(4) 41 #define L23_CLK_RMV_DIS BIT(2) 42 #define L1_CLK_RMV_DIS BIT(1) 43 44 #define PCIE20_PARF_PM_CTRL 0x20 45 #define REQ_NOT_ENTR_L1 BIT(5) 46 47 #define PCIE20_PARF_PHY_CTRL 0x40 48 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 49 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 50 51 #define PCIE20_PARF_PHY_REFCLK 0x4C 52 #define PHY_REFCLK_SSP_EN BIT(16) 53 #define PHY_REFCLK_USE_PAD BIT(12) 54 55 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 56 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 57 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 58 #define AHB_CLK_EN BIT(0) 59 #define MSTR_AXI_CLK_EN BIT(1) 60 #define BYPASS BIT(4) 61 62 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 63 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 64 #define PCIE20_PARF_LTSSM 0x1B0 65 #define PCIE20_PARF_SID_OFFSET 0x234 66 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 67 #define PCIE20_PARF_DEVICE_TYPE 0x1000 68 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 69 70 #define PCIE20_ELBI_SYS_CTRL 0x04 71 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 72 73 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 74 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 75 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 76 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 77 #define CFG_BRIDGE_SB_INIT BIT(0) 78 79 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ 80 250) 81 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ 82 1) 83 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 84 PCI_EXP_SLTCAP_PCP | \ 85 PCI_EXP_SLTCAP_MRLSP | \ 86 PCI_EXP_SLTCAP_AIP | \ 87 PCI_EXP_SLTCAP_PIP | \ 88 PCI_EXP_SLTCAP_HPS | \ 89 PCI_EXP_SLTCAP_HPC | \ 90 PCI_EXP_SLTCAP_EIP | \ 91 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 92 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 93 94 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 95 96 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 97 #define DBI_RO_WR_EN 1 98 99 #define PERST_DELAY_US 1000 100 /* PARF registers */ 101 #define PCIE20_PARF_PCS_DEEMPH 0x34 102 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 103 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 105 106 #define PCIE20_PARF_PCS_SWING 0x38 107 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 108 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 109 110 #define PCIE20_PARF_CONFIG_BITS 0x50 111 #define PHY_RX0_EQ(x) ((x) << 24) 112 113 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 114 #define SLV_ADDR_SPACE_SZ 0x10000000 115 116 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 117 118 #define DEVICE_TYPE_RC 0x4 119 120 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 121 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 122 123 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 124 125 struct qcom_pcie_resources_2_1_0 { 126 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 127 struct reset_control *pci_reset; 128 struct reset_control *axi_reset; 129 struct reset_control *ahb_reset; 130 struct reset_control *por_reset; 131 struct reset_control *phy_reset; 132 struct reset_control *ext_reset; 133 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 134 }; 135 136 struct qcom_pcie_resources_1_0_0 { 137 struct clk *iface; 138 struct clk *aux; 139 struct clk *master_bus; 140 struct clk *slave_bus; 141 struct reset_control *core; 142 struct regulator *vdda; 143 }; 144 145 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 146 struct qcom_pcie_resources_2_3_2 { 147 struct clk *aux_clk; 148 struct clk *master_clk; 149 struct clk *slave_clk; 150 struct clk *cfg_clk; 151 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 152 }; 153 154 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 155 struct qcom_pcie_resources_2_4_0 { 156 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 157 int num_clks; 158 struct reset_control *axi_m_reset; 159 struct reset_control *axi_s_reset; 160 struct reset_control *pipe_reset; 161 struct reset_control *axi_m_vmid_reset; 162 struct reset_control *axi_s_xpu_reset; 163 struct reset_control *parf_reset; 164 struct reset_control *phy_reset; 165 struct reset_control *axi_m_sticky_reset; 166 struct reset_control *pipe_sticky_reset; 167 struct reset_control *pwr_reset; 168 struct reset_control *ahb_reset; 169 struct reset_control *phy_ahb_reset; 170 }; 171 172 struct qcom_pcie_resources_2_3_3 { 173 struct clk *iface; 174 struct clk *axi_m_clk; 175 struct clk *axi_s_clk; 176 struct clk *ahb_clk; 177 struct clk *aux_clk; 178 struct reset_control *rst[7]; 179 }; 180 181 /* 6 clocks typically, 7 for sm8250 */ 182 struct qcom_pcie_resources_2_7_0 { 183 struct clk_bulk_data clks[12]; 184 int num_clks; 185 struct regulator_bulk_data supplies[2]; 186 struct reset_control *pci_reset; 187 }; 188 189 struct qcom_pcie_resources_2_9_0 { 190 struct clk_bulk_data clks[5]; 191 struct reset_control *rst; 192 }; 193 194 union qcom_pcie_resources { 195 struct qcom_pcie_resources_1_0_0 v1_0_0; 196 struct qcom_pcie_resources_2_1_0 v2_1_0; 197 struct qcom_pcie_resources_2_3_2 v2_3_2; 198 struct qcom_pcie_resources_2_3_3 v2_3_3; 199 struct qcom_pcie_resources_2_4_0 v2_4_0; 200 struct qcom_pcie_resources_2_7_0 v2_7_0; 201 struct qcom_pcie_resources_2_9_0 v2_9_0; 202 }; 203 204 struct qcom_pcie; 205 206 struct qcom_pcie_ops { 207 int (*get_resources)(struct qcom_pcie *pcie); 208 int (*init)(struct qcom_pcie *pcie); 209 int (*post_init)(struct qcom_pcie *pcie); 210 void (*deinit)(struct qcom_pcie *pcie); 211 void (*ltssm_enable)(struct qcom_pcie *pcie); 212 int (*config_sid)(struct qcom_pcie *pcie); 213 }; 214 215 struct qcom_pcie_cfg { 216 const struct qcom_pcie_ops *ops; 217 }; 218 219 struct qcom_pcie { 220 struct dw_pcie *pci; 221 void __iomem *parf; /* DT parf */ 222 void __iomem *elbi; /* DT elbi */ 223 union qcom_pcie_resources res; 224 struct phy *phy; 225 struct gpio_desc *reset; 226 const struct qcom_pcie_cfg *cfg; 227 }; 228 229 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 230 231 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 232 { 233 gpiod_set_value_cansleep(pcie->reset, 1); 234 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 235 } 236 237 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 238 { 239 /* Ensure that PERST has been asserted for at least 100 ms */ 240 msleep(100); 241 gpiod_set_value_cansleep(pcie->reset, 0); 242 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 243 } 244 245 static int qcom_pcie_start_link(struct dw_pcie *pci) 246 { 247 struct qcom_pcie *pcie = to_qcom_pcie(pci); 248 249 /* Enable Link Training state machine */ 250 if (pcie->cfg->ops->ltssm_enable) 251 pcie->cfg->ops->ltssm_enable(pcie); 252 253 return 0; 254 } 255 256 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 257 { 258 u32 val; 259 260 /* enable link training */ 261 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 262 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 263 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 264 } 265 266 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 267 { 268 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 269 struct dw_pcie *pci = pcie->pci; 270 struct device *dev = pci->dev; 271 int ret; 272 273 res->supplies[0].supply = "vdda"; 274 res->supplies[1].supply = "vdda_phy"; 275 res->supplies[2].supply = "vdda_refclk"; 276 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 277 res->supplies); 278 if (ret) 279 return ret; 280 281 res->clks[0].id = "iface"; 282 res->clks[1].id = "core"; 283 res->clks[2].id = "phy"; 284 res->clks[3].id = "aux"; 285 res->clks[4].id = "ref"; 286 287 /* iface, core, phy are required */ 288 ret = devm_clk_bulk_get(dev, 3, res->clks); 289 if (ret < 0) 290 return ret; 291 292 /* aux, ref are optional */ 293 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 294 if (ret < 0) 295 return ret; 296 297 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 298 if (IS_ERR(res->pci_reset)) 299 return PTR_ERR(res->pci_reset); 300 301 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 302 if (IS_ERR(res->axi_reset)) 303 return PTR_ERR(res->axi_reset); 304 305 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 306 if (IS_ERR(res->ahb_reset)) 307 return PTR_ERR(res->ahb_reset); 308 309 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 310 if (IS_ERR(res->por_reset)) 311 return PTR_ERR(res->por_reset); 312 313 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 314 if (IS_ERR(res->ext_reset)) 315 return PTR_ERR(res->ext_reset); 316 317 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 318 return PTR_ERR_OR_ZERO(res->phy_reset); 319 } 320 321 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 322 { 323 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 324 325 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 326 reset_control_assert(res->pci_reset); 327 reset_control_assert(res->axi_reset); 328 reset_control_assert(res->ahb_reset); 329 reset_control_assert(res->por_reset); 330 reset_control_assert(res->ext_reset); 331 reset_control_assert(res->phy_reset); 332 333 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 334 335 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 336 } 337 338 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 339 { 340 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 341 struct dw_pcie *pci = pcie->pci; 342 struct device *dev = pci->dev; 343 int ret; 344 345 /* reset the PCIe interface as uboot can leave it undefined state */ 346 reset_control_assert(res->pci_reset); 347 reset_control_assert(res->axi_reset); 348 reset_control_assert(res->ahb_reset); 349 reset_control_assert(res->por_reset); 350 reset_control_assert(res->ext_reset); 351 reset_control_assert(res->phy_reset); 352 353 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 354 if (ret < 0) { 355 dev_err(dev, "cannot enable regulators\n"); 356 return ret; 357 } 358 359 ret = reset_control_deassert(res->ahb_reset); 360 if (ret) { 361 dev_err(dev, "cannot deassert ahb reset\n"); 362 goto err_deassert_ahb; 363 } 364 365 ret = reset_control_deassert(res->ext_reset); 366 if (ret) { 367 dev_err(dev, "cannot deassert ext reset\n"); 368 goto err_deassert_ext; 369 } 370 371 ret = reset_control_deassert(res->phy_reset); 372 if (ret) { 373 dev_err(dev, "cannot deassert phy reset\n"); 374 goto err_deassert_phy; 375 } 376 377 ret = reset_control_deassert(res->pci_reset); 378 if (ret) { 379 dev_err(dev, "cannot deassert pci reset\n"); 380 goto err_deassert_pci; 381 } 382 383 ret = reset_control_deassert(res->por_reset); 384 if (ret) { 385 dev_err(dev, "cannot deassert por reset\n"); 386 goto err_deassert_por; 387 } 388 389 ret = reset_control_deassert(res->axi_reset); 390 if (ret) { 391 dev_err(dev, "cannot deassert axi reset\n"); 392 goto err_deassert_axi; 393 } 394 395 return 0; 396 397 err_deassert_axi: 398 reset_control_assert(res->por_reset); 399 err_deassert_por: 400 reset_control_assert(res->pci_reset); 401 err_deassert_pci: 402 reset_control_assert(res->phy_reset); 403 err_deassert_phy: 404 reset_control_assert(res->ext_reset); 405 err_deassert_ext: 406 reset_control_assert(res->ahb_reset); 407 err_deassert_ahb: 408 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 409 410 return ret; 411 } 412 413 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 414 { 415 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 416 struct dw_pcie *pci = pcie->pci; 417 struct device *dev = pci->dev; 418 struct device_node *node = dev->of_node; 419 u32 val; 420 int ret; 421 422 /* enable PCIe clocks and resets */ 423 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 424 val &= ~BIT(0); 425 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 426 427 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 428 if (ret) 429 return ret; 430 431 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 432 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 433 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 434 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 435 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 436 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 437 writel(PCS_SWING_TX_SWING_FULL(120) | 438 PCS_SWING_TX_SWING_LOW(120), 439 pcie->parf + PCIE20_PARF_PCS_SWING); 440 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 441 } 442 443 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 444 /* set TX termination offset */ 445 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 446 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 447 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 448 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 449 } 450 451 /* enable external reference clock */ 452 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 453 /* USE_PAD is required only for ipq806x */ 454 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 455 val &= ~PHY_REFCLK_USE_PAD; 456 val |= PHY_REFCLK_SSP_EN; 457 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 458 459 /* wait for clock acquisition */ 460 usleep_range(1000, 1500); 461 462 /* Set the Max TLP size to 2K, instead of using default of 4K */ 463 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 464 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 465 writel(CFG_BRIDGE_SB_INIT, 466 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 467 468 return 0; 469 } 470 471 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 472 { 473 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 474 struct dw_pcie *pci = pcie->pci; 475 struct device *dev = pci->dev; 476 477 res->vdda = devm_regulator_get(dev, "vdda"); 478 if (IS_ERR(res->vdda)) 479 return PTR_ERR(res->vdda); 480 481 res->iface = devm_clk_get(dev, "iface"); 482 if (IS_ERR(res->iface)) 483 return PTR_ERR(res->iface); 484 485 res->aux = devm_clk_get(dev, "aux"); 486 if (IS_ERR(res->aux)) 487 return PTR_ERR(res->aux); 488 489 res->master_bus = devm_clk_get(dev, "master_bus"); 490 if (IS_ERR(res->master_bus)) 491 return PTR_ERR(res->master_bus); 492 493 res->slave_bus = devm_clk_get(dev, "slave_bus"); 494 if (IS_ERR(res->slave_bus)) 495 return PTR_ERR(res->slave_bus); 496 497 res->core = devm_reset_control_get_exclusive(dev, "core"); 498 return PTR_ERR_OR_ZERO(res->core); 499 } 500 501 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 502 { 503 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 504 505 reset_control_assert(res->core); 506 clk_disable_unprepare(res->slave_bus); 507 clk_disable_unprepare(res->master_bus); 508 clk_disable_unprepare(res->iface); 509 clk_disable_unprepare(res->aux); 510 regulator_disable(res->vdda); 511 } 512 513 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 514 { 515 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 516 struct dw_pcie *pci = pcie->pci; 517 struct device *dev = pci->dev; 518 int ret; 519 520 ret = reset_control_deassert(res->core); 521 if (ret) { 522 dev_err(dev, "cannot deassert core reset\n"); 523 return ret; 524 } 525 526 ret = clk_prepare_enable(res->aux); 527 if (ret) { 528 dev_err(dev, "cannot prepare/enable aux clock\n"); 529 goto err_res; 530 } 531 532 ret = clk_prepare_enable(res->iface); 533 if (ret) { 534 dev_err(dev, "cannot prepare/enable iface clock\n"); 535 goto err_aux; 536 } 537 538 ret = clk_prepare_enable(res->master_bus); 539 if (ret) { 540 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 541 goto err_iface; 542 } 543 544 ret = clk_prepare_enable(res->slave_bus); 545 if (ret) { 546 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 547 goto err_master; 548 } 549 550 ret = regulator_enable(res->vdda); 551 if (ret) { 552 dev_err(dev, "cannot enable vdda regulator\n"); 553 goto err_slave; 554 } 555 556 return 0; 557 err_slave: 558 clk_disable_unprepare(res->slave_bus); 559 err_master: 560 clk_disable_unprepare(res->master_bus); 561 err_iface: 562 clk_disable_unprepare(res->iface); 563 err_aux: 564 clk_disable_unprepare(res->aux); 565 err_res: 566 reset_control_assert(res->core); 567 568 return ret; 569 } 570 571 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 572 { 573 /* change DBI base address */ 574 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 575 576 if (IS_ENABLED(CONFIG_PCI_MSI)) { 577 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 578 579 val |= BIT(31); 580 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 581 } 582 583 return 0; 584 } 585 586 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 587 { 588 u32 val; 589 590 /* enable link training */ 591 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 592 val |= BIT(8); 593 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 594 } 595 596 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 597 { 598 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 599 struct dw_pcie *pci = pcie->pci; 600 struct device *dev = pci->dev; 601 int ret; 602 603 res->supplies[0].supply = "vdda"; 604 res->supplies[1].supply = "vddpe-3v3"; 605 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 606 res->supplies); 607 if (ret) 608 return ret; 609 610 res->aux_clk = devm_clk_get(dev, "aux"); 611 if (IS_ERR(res->aux_clk)) 612 return PTR_ERR(res->aux_clk); 613 614 res->cfg_clk = devm_clk_get(dev, "cfg"); 615 if (IS_ERR(res->cfg_clk)) 616 return PTR_ERR(res->cfg_clk); 617 618 res->master_clk = devm_clk_get(dev, "bus_master"); 619 if (IS_ERR(res->master_clk)) 620 return PTR_ERR(res->master_clk); 621 622 res->slave_clk = devm_clk_get(dev, "bus_slave"); 623 if (IS_ERR(res->slave_clk)) 624 return PTR_ERR(res->slave_clk); 625 626 return 0; 627 } 628 629 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 630 { 631 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 632 633 clk_disable_unprepare(res->slave_clk); 634 clk_disable_unprepare(res->master_clk); 635 clk_disable_unprepare(res->cfg_clk); 636 clk_disable_unprepare(res->aux_clk); 637 638 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 639 } 640 641 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 642 { 643 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 644 struct dw_pcie *pci = pcie->pci; 645 struct device *dev = pci->dev; 646 int ret; 647 648 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 649 if (ret < 0) { 650 dev_err(dev, "cannot enable regulators\n"); 651 return ret; 652 } 653 654 ret = clk_prepare_enable(res->aux_clk); 655 if (ret) { 656 dev_err(dev, "cannot prepare/enable aux clock\n"); 657 goto err_aux_clk; 658 } 659 660 ret = clk_prepare_enable(res->cfg_clk); 661 if (ret) { 662 dev_err(dev, "cannot prepare/enable cfg clock\n"); 663 goto err_cfg_clk; 664 } 665 666 ret = clk_prepare_enable(res->master_clk); 667 if (ret) { 668 dev_err(dev, "cannot prepare/enable master clock\n"); 669 goto err_master_clk; 670 } 671 672 ret = clk_prepare_enable(res->slave_clk); 673 if (ret) { 674 dev_err(dev, "cannot prepare/enable slave clock\n"); 675 goto err_slave_clk; 676 } 677 678 return 0; 679 680 err_slave_clk: 681 clk_disable_unprepare(res->master_clk); 682 err_master_clk: 683 clk_disable_unprepare(res->cfg_clk); 684 err_cfg_clk: 685 clk_disable_unprepare(res->aux_clk); 686 687 err_aux_clk: 688 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 689 690 return ret; 691 } 692 693 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 694 { 695 u32 val; 696 697 /* enable PCIe clocks and resets */ 698 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 699 val &= ~BIT(0); 700 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 701 702 /* change DBI base address */ 703 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 704 705 /* MAC PHY_POWERDOWN MUX DISABLE */ 706 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 707 val &= ~BIT(29); 708 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 709 710 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 711 val |= BIT(4); 712 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 713 714 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 715 val |= BIT(31); 716 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 717 718 return 0; 719 } 720 721 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 722 { 723 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 724 struct dw_pcie *pci = pcie->pci; 725 struct device *dev = pci->dev; 726 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 727 int ret; 728 729 res->clks[0].id = "aux"; 730 res->clks[1].id = "master_bus"; 731 res->clks[2].id = "slave_bus"; 732 res->clks[3].id = "iface"; 733 734 /* qcom,pcie-ipq4019 is defined without "iface" */ 735 res->num_clks = is_ipq ? 3 : 4; 736 737 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 738 if (ret < 0) 739 return ret; 740 741 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 742 if (IS_ERR(res->axi_m_reset)) 743 return PTR_ERR(res->axi_m_reset); 744 745 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 746 if (IS_ERR(res->axi_s_reset)) 747 return PTR_ERR(res->axi_s_reset); 748 749 if (is_ipq) { 750 /* 751 * These resources relates to the PHY or are secure clocks, but 752 * are controlled here for IPQ4019 753 */ 754 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 755 if (IS_ERR(res->pipe_reset)) 756 return PTR_ERR(res->pipe_reset); 757 758 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 759 "axi_m_vmid"); 760 if (IS_ERR(res->axi_m_vmid_reset)) 761 return PTR_ERR(res->axi_m_vmid_reset); 762 763 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 764 "axi_s_xpu"); 765 if (IS_ERR(res->axi_s_xpu_reset)) 766 return PTR_ERR(res->axi_s_xpu_reset); 767 768 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 769 if (IS_ERR(res->parf_reset)) 770 return PTR_ERR(res->parf_reset); 771 772 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 773 if (IS_ERR(res->phy_reset)) 774 return PTR_ERR(res->phy_reset); 775 } 776 777 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 778 "axi_m_sticky"); 779 if (IS_ERR(res->axi_m_sticky_reset)) 780 return PTR_ERR(res->axi_m_sticky_reset); 781 782 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 783 "pipe_sticky"); 784 if (IS_ERR(res->pipe_sticky_reset)) 785 return PTR_ERR(res->pipe_sticky_reset); 786 787 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 788 if (IS_ERR(res->pwr_reset)) 789 return PTR_ERR(res->pwr_reset); 790 791 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 792 if (IS_ERR(res->ahb_reset)) 793 return PTR_ERR(res->ahb_reset); 794 795 if (is_ipq) { 796 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 797 if (IS_ERR(res->phy_ahb_reset)) 798 return PTR_ERR(res->phy_ahb_reset); 799 } 800 801 return 0; 802 } 803 804 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 805 { 806 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 807 808 reset_control_assert(res->axi_m_reset); 809 reset_control_assert(res->axi_s_reset); 810 reset_control_assert(res->pipe_reset); 811 reset_control_assert(res->pipe_sticky_reset); 812 reset_control_assert(res->phy_reset); 813 reset_control_assert(res->phy_ahb_reset); 814 reset_control_assert(res->axi_m_sticky_reset); 815 reset_control_assert(res->pwr_reset); 816 reset_control_assert(res->ahb_reset); 817 clk_bulk_disable_unprepare(res->num_clks, res->clks); 818 } 819 820 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 821 { 822 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 823 struct dw_pcie *pci = pcie->pci; 824 struct device *dev = pci->dev; 825 int ret; 826 827 ret = reset_control_assert(res->axi_m_reset); 828 if (ret) { 829 dev_err(dev, "cannot assert axi master reset\n"); 830 return ret; 831 } 832 833 ret = reset_control_assert(res->axi_s_reset); 834 if (ret) { 835 dev_err(dev, "cannot assert axi slave reset\n"); 836 return ret; 837 } 838 839 usleep_range(10000, 12000); 840 841 ret = reset_control_assert(res->pipe_reset); 842 if (ret) { 843 dev_err(dev, "cannot assert pipe reset\n"); 844 return ret; 845 } 846 847 ret = reset_control_assert(res->pipe_sticky_reset); 848 if (ret) { 849 dev_err(dev, "cannot assert pipe sticky reset\n"); 850 return ret; 851 } 852 853 ret = reset_control_assert(res->phy_reset); 854 if (ret) { 855 dev_err(dev, "cannot assert phy reset\n"); 856 return ret; 857 } 858 859 ret = reset_control_assert(res->phy_ahb_reset); 860 if (ret) { 861 dev_err(dev, "cannot assert phy ahb reset\n"); 862 return ret; 863 } 864 865 usleep_range(10000, 12000); 866 867 ret = reset_control_assert(res->axi_m_sticky_reset); 868 if (ret) { 869 dev_err(dev, "cannot assert axi master sticky reset\n"); 870 return ret; 871 } 872 873 ret = reset_control_assert(res->pwr_reset); 874 if (ret) { 875 dev_err(dev, "cannot assert power reset\n"); 876 return ret; 877 } 878 879 ret = reset_control_assert(res->ahb_reset); 880 if (ret) { 881 dev_err(dev, "cannot assert ahb reset\n"); 882 return ret; 883 } 884 885 usleep_range(10000, 12000); 886 887 ret = reset_control_deassert(res->phy_ahb_reset); 888 if (ret) { 889 dev_err(dev, "cannot deassert phy ahb reset\n"); 890 return ret; 891 } 892 893 ret = reset_control_deassert(res->phy_reset); 894 if (ret) { 895 dev_err(dev, "cannot deassert phy reset\n"); 896 goto err_rst_phy; 897 } 898 899 ret = reset_control_deassert(res->pipe_reset); 900 if (ret) { 901 dev_err(dev, "cannot deassert pipe reset\n"); 902 goto err_rst_pipe; 903 } 904 905 ret = reset_control_deassert(res->pipe_sticky_reset); 906 if (ret) { 907 dev_err(dev, "cannot deassert pipe sticky reset\n"); 908 goto err_rst_pipe_sticky; 909 } 910 911 usleep_range(10000, 12000); 912 913 ret = reset_control_deassert(res->axi_m_reset); 914 if (ret) { 915 dev_err(dev, "cannot deassert axi master reset\n"); 916 goto err_rst_axi_m; 917 } 918 919 ret = reset_control_deassert(res->axi_m_sticky_reset); 920 if (ret) { 921 dev_err(dev, "cannot deassert axi master sticky reset\n"); 922 goto err_rst_axi_m_sticky; 923 } 924 925 ret = reset_control_deassert(res->axi_s_reset); 926 if (ret) { 927 dev_err(dev, "cannot deassert axi slave reset\n"); 928 goto err_rst_axi_s; 929 } 930 931 ret = reset_control_deassert(res->pwr_reset); 932 if (ret) { 933 dev_err(dev, "cannot deassert power reset\n"); 934 goto err_rst_pwr; 935 } 936 937 ret = reset_control_deassert(res->ahb_reset); 938 if (ret) { 939 dev_err(dev, "cannot deassert ahb reset\n"); 940 goto err_rst_ahb; 941 } 942 943 usleep_range(10000, 12000); 944 945 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 946 if (ret) 947 goto err_clks; 948 949 return 0; 950 951 err_clks: 952 reset_control_assert(res->ahb_reset); 953 err_rst_ahb: 954 reset_control_assert(res->pwr_reset); 955 err_rst_pwr: 956 reset_control_assert(res->axi_s_reset); 957 err_rst_axi_s: 958 reset_control_assert(res->axi_m_sticky_reset); 959 err_rst_axi_m_sticky: 960 reset_control_assert(res->axi_m_reset); 961 err_rst_axi_m: 962 reset_control_assert(res->pipe_sticky_reset); 963 err_rst_pipe_sticky: 964 reset_control_assert(res->pipe_reset); 965 err_rst_pipe: 966 reset_control_assert(res->phy_reset); 967 err_rst_phy: 968 reset_control_assert(res->phy_ahb_reset); 969 return ret; 970 } 971 972 static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) 973 { 974 u32 val; 975 976 /* enable PCIe clocks and resets */ 977 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 978 val &= ~BIT(0); 979 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 980 981 /* change DBI base address */ 982 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 983 984 /* MAC PHY_POWERDOWN MUX DISABLE */ 985 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 986 val &= ~BIT(29); 987 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 988 989 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 990 val |= BIT(4); 991 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 992 993 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 994 val |= BIT(31); 995 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 996 997 return 0; 998 } 999 1000 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 1001 { 1002 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1003 struct dw_pcie *pci = pcie->pci; 1004 struct device *dev = pci->dev; 1005 int i; 1006 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 1007 "axi_m_sticky", "sticky", 1008 "ahb", "sleep", }; 1009 1010 res->iface = devm_clk_get(dev, "iface"); 1011 if (IS_ERR(res->iface)) 1012 return PTR_ERR(res->iface); 1013 1014 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 1015 if (IS_ERR(res->axi_m_clk)) 1016 return PTR_ERR(res->axi_m_clk); 1017 1018 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 1019 if (IS_ERR(res->axi_s_clk)) 1020 return PTR_ERR(res->axi_s_clk); 1021 1022 res->ahb_clk = devm_clk_get(dev, "ahb"); 1023 if (IS_ERR(res->ahb_clk)) 1024 return PTR_ERR(res->ahb_clk); 1025 1026 res->aux_clk = devm_clk_get(dev, "aux"); 1027 if (IS_ERR(res->aux_clk)) 1028 return PTR_ERR(res->aux_clk); 1029 1030 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1031 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1032 if (IS_ERR(res->rst[i])) 1033 return PTR_ERR(res->rst[i]); 1034 } 1035 1036 return 0; 1037 } 1038 1039 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1040 { 1041 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1042 1043 clk_disable_unprepare(res->iface); 1044 clk_disable_unprepare(res->axi_m_clk); 1045 clk_disable_unprepare(res->axi_s_clk); 1046 clk_disable_unprepare(res->ahb_clk); 1047 clk_disable_unprepare(res->aux_clk); 1048 } 1049 1050 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1051 { 1052 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1053 struct dw_pcie *pci = pcie->pci; 1054 struct device *dev = pci->dev; 1055 int i, ret; 1056 1057 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1058 ret = reset_control_assert(res->rst[i]); 1059 if (ret) { 1060 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1061 return ret; 1062 } 1063 } 1064 1065 usleep_range(2000, 2500); 1066 1067 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1068 ret = reset_control_deassert(res->rst[i]); 1069 if (ret) { 1070 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1071 ret); 1072 return ret; 1073 } 1074 } 1075 1076 /* 1077 * Don't have a way to see if the reset has completed. 1078 * Wait for some time. 1079 */ 1080 usleep_range(2000, 2500); 1081 1082 ret = clk_prepare_enable(res->iface); 1083 if (ret) { 1084 dev_err(dev, "cannot prepare/enable core clock\n"); 1085 goto err_clk_iface; 1086 } 1087 1088 ret = clk_prepare_enable(res->axi_m_clk); 1089 if (ret) { 1090 dev_err(dev, "cannot prepare/enable core clock\n"); 1091 goto err_clk_axi_m; 1092 } 1093 1094 ret = clk_prepare_enable(res->axi_s_clk); 1095 if (ret) { 1096 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1097 goto err_clk_axi_s; 1098 } 1099 1100 ret = clk_prepare_enable(res->ahb_clk); 1101 if (ret) { 1102 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1103 goto err_clk_ahb; 1104 } 1105 1106 ret = clk_prepare_enable(res->aux_clk); 1107 if (ret) { 1108 dev_err(dev, "cannot prepare/enable aux clock\n"); 1109 goto err_clk_aux; 1110 } 1111 1112 return 0; 1113 1114 err_clk_aux: 1115 clk_disable_unprepare(res->ahb_clk); 1116 err_clk_ahb: 1117 clk_disable_unprepare(res->axi_s_clk); 1118 err_clk_axi_s: 1119 clk_disable_unprepare(res->axi_m_clk); 1120 err_clk_axi_m: 1121 clk_disable_unprepare(res->iface); 1122 err_clk_iface: 1123 /* 1124 * Not checking for failure, will anyway return 1125 * the original failure in 'ret'. 1126 */ 1127 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1128 reset_control_assert(res->rst[i]); 1129 1130 return ret; 1131 } 1132 1133 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 1134 { 1135 struct dw_pcie *pci = pcie->pci; 1136 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1137 u32 val; 1138 1139 writel(SLV_ADDR_SPACE_SZ, 1140 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1141 1142 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1143 val &= ~BIT(0); 1144 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1145 1146 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1147 1148 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1149 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1150 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1151 pcie->parf + PCIE20_PARF_SYS_CTRL); 1152 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1153 1154 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1155 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1156 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1157 1158 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1159 val &= ~PCI_EXP_LNKCAP_ASPMS; 1160 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1161 1162 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1163 PCI_EXP_DEVCTL2); 1164 1165 return 0; 1166 } 1167 1168 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1169 { 1170 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1171 struct dw_pcie *pci = pcie->pci; 1172 struct device *dev = pci->dev; 1173 unsigned int num_clks, num_opt_clks; 1174 unsigned int idx; 1175 int ret; 1176 1177 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1178 if (IS_ERR(res->pci_reset)) 1179 return PTR_ERR(res->pci_reset); 1180 1181 res->supplies[0].supply = "vdda"; 1182 res->supplies[1].supply = "vddpe-3v3"; 1183 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1184 res->supplies); 1185 if (ret) 1186 return ret; 1187 1188 idx = 0; 1189 res->clks[idx++].id = "aux"; 1190 res->clks[idx++].id = "cfg"; 1191 res->clks[idx++].id = "bus_master"; 1192 res->clks[idx++].id = "bus_slave"; 1193 res->clks[idx++].id = "slave_q2a"; 1194 1195 num_clks = idx; 1196 1197 ret = devm_clk_bulk_get(dev, num_clks, res->clks); 1198 if (ret < 0) 1199 return ret; 1200 1201 res->clks[idx++].id = "tbu"; 1202 res->clks[idx++].id = "ddrss_sf_tbu"; 1203 res->clks[idx++].id = "aggre0"; 1204 res->clks[idx++].id = "aggre1"; 1205 res->clks[idx++].id = "noc_aggr_4"; 1206 res->clks[idx++].id = "noc_aggr_south_sf"; 1207 res->clks[idx++].id = "cnoc_qx"; 1208 1209 num_opt_clks = idx - num_clks; 1210 res->num_clks = idx; 1211 1212 ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); 1213 if (ret < 0) 1214 return ret; 1215 1216 return 0; 1217 } 1218 1219 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1220 { 1221 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1222 struct dw_pcie *pci = pcie->pci; 1223 struct device *dev = pci->dev; 1224 u32 val; 1225 int ret; 1226 1227 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1228 if (ret < 0) { 1229 dev_err(dev, "cannot enable regulators\n"); 1230 return ret; 1231 } 1232 1233 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 1234 if (ret < 0) 1235 goto err_disable_regulators; 1236 1237 ret = reset_control_assert(res->pci_reset); 1238 if (ret < 0) { 1239 dev_err(dev, "cannot deassert pci reset\n"); 1240 goto err_disable_clocks; 1241 } 1242 1243 usleep_range(1000, 1500); 1244 1245 ret = reset_control_deassert(res->pci_reset); 1246 if (ret < 0) { 1247 dev_err(dev, "cannot deassert pci reset\n"); 1248 goto err_disable_clocks; 1249 } 1250 1251 /* Wait for reset to complete, required on SM8450 */ 1252 usleep_range(1000, 1500); 1253 1254 /* configure PCIe to RC mode */ 1255 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1256 1257 /* enable PCIe clocks and resets */ 1258 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1259 val &= ~BIT(0); 1260 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1261 1262 /* change DBI base address */ 1263 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1264 1265 /* MAC PHY_POWERDOWN MUX DISABLE */ 1266 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1267 val &= ~BIT(29); 1268 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1269 1270 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1271 val |= BIT(4); 1272 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1273 1274 /* Enable L1 and L1SS */ 1275 val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); 1276 val &= ~REQ_NOT_ENTR_L1; 1277 writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); 1278 1279 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1280 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1281 val |= BIT(31); 1282 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1283 } 1284 1285 return 0; 1286 err_disable_clocks: 1287 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1288 err_disable_regulators: 1289 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1290 1291 return ret; 1292 } 1293 1294 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1295 { 1296 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1297 1298 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1299 1300 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1301 } 1302 1303 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1304 { 1305 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1306 struct dw_pcie *pci = pcie->pci; 1307 struct device *dev = pci->dev; 1308 int ret; 1309 1310 res->clks[0].id = "iface"; 1311 res->clks[1].id = "axi_m"; 1312 res->clks[2].id = "axi_s"; 1313 res->clks[3].id = "axi_bridge"; 1314 res->clks[4].id = "rchng"; 1315 1316 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1317 if (ret < 0) 1318 return ret; 1319 1320 res->rst = devm_reset_control_array_get_exclusive(dev); 1321 if (IS_ERR(res->rst)) 1322 return PTR_ERR(res->rst); 1323 1324 return 0; 1325 } 1326 1327 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1328 { 1329 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1330 1331 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1332 } 1333 1334 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1335 { 1336 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1337 struct device *dev = pcie->pci->dev; 1338 int ret; 1339 1340 ret = reset_control_assert(res->rst); 1341 if (ret) { 1342 dev_err(dev, "reset assert failed (%d)\n", ret); 1343 return ret; 1344 } 1345 1346 /* 1347 * Delay periods before and after reset deassert are working values 1348 * from downstream Codeaurora kernel 1349 */ 1350 usleep_range(2000, 2500); 1351 1352 ret = reset_control_deassert(res->rst); 1353 if (ret) { 1354 dev_err(dev, "reset deassert failed (%d)\n", ret); 1355 return ret; 1356 } 1357 1358 usleep_range(2000, 2500); 1359 1360 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1361 } 1362 1363 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1364 { 1365 struct dw_pcie *pci = pcie->pci; 1366 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1367 u32 val; 1368 int i; 1369 1370 writel(SLV_ADDR_SPACE_SZ, 1371 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1372 1373 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1374 val &= ~BIT(0); 1375 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1376 1377 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1378 1379 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1380 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1381 pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1382 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1383 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1384 pci->dbi_base + GEN3_RELATED_OFF); 1385 1386 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1387 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1388 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1389 pcie->parf + PCIE20_PARF_SYS_CTRL); 1390 1391 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1392 1393 dw_pcie_dbi_ro_wr_en(pci); 1394 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1395 1396 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1397 val &= ~PCI_EXP_LNKCAP_ASPMS; 1398 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1399 1400 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1401 PCI_EXP_DEVCTL2); 1402 1403 for (i = 0; i < 256; i++) 1404 writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1405 1406 return 0; 1407 } 1408 1409 static int qcom_pcie_link_up(struct dw_pcie *pci) 1410 { 1411 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1412 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1413 1414 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1415 } 1416 1417 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1418 { 1419 /* iommu map structure */ 1420 struct { 1421 u32 bdf; 1422 u32 phandle; 1423 u32 smmu_sid; 1424 u32 smmu_sid_len; 1425 } *map; 1426 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1427 struct device *dev = pcie->pci->dev; 1428 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1429 int i, nr_map, size = 0; 1430 u32 smmu_sid_base; 1431 1432 of_get_property(dev->of_node, "iommu-map", &size); 1433 if (!size) 1434 return 0; 1435 1436 map = kzalloc(size, GFP_KERNEL); 1437 if (!map) 1438 return -ENOMEM; 1439 1440 of_property_read_u32_array(dev->of_node, 1441 "iommu-map", (u32 *)map, size / sizeof(u32)); 1442 1443 nr_map = size / (sizeof(*map)); 1444 1445 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1446 1447 /* Registers need to be zero out first */ 1448 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1449 1450 /* Extract the SMMU SID base from the first entry of iommu-map */ 1451 smmu_sid_base = map[0].smmu_sid; 1452 1453 /* Look for an available entry to hold the mapping */ 1454 for (i = 0; i < nr_map; i++) { 1455 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1456 u32 val; 1457 u8 hash; 1458 1459 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1460 0); 1461 1462 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1463 1464 /* If the register is already populated, look for next available entry */ 1465 while (val) { 1466 u8 current_hash = hash++; 1467 u8 next_mask = 0xff; 1468 1469 /* If NEXT field is NULL then update it with next hash */ 1470 if (!(val & next_mask)) { 1471 val |= (u32)hash; 1472 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1473 } 1474 1475 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1476 } 1477 1478 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1479 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1480 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1481 } 1482 1483 kfree(map); 1484 1485 return 0; 1486 } 1487 1488 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1489 { 1490 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1491 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1492 int ret; 1493 1494 qcom_ep_reset_assert(pcie); 1495 1496 ret = pcie->cfg->ops->init(pcie); 1497 if (ret) 1498 return ret; 1499 1500 ret = phy_power_on(pcie->phy); 1501 if (ret) 1502 goto err_deinit; 1503 1504 if (pcie->cfg->ops->post_init) { 1505 ret = pcie->cfg->ops->post_init(pcie); 1506 if (ret) 1507 goto err_disable_phy; 1508 } 1509 1510 qcom_ep_reset_deassert(pcie); 1511 1512 if (pcie->cfg->ops->config_sid) { 1513 ret = pcie->cfg->ops->config_sid(pcie); 1514 if (ret) 1515 goto err_assert_reset; 1516 } 1517 1518 return 0; 1519 1520 err_assert_reset: 1521 qcom_ep_reset_assert(pcie); 1522 err_disable_phy: 1523 phy_power_off(pcie->phy); 1524 err_deinit: 1525 pcie->cfg->ops->deinit(pcie); 1526 1527 return ret; 1528 } 1529 1530 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1531 .host_init = qcom_pcie_host_init, 1532 }; 1533 1534 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1535 static const struct qcom_pcie_ops ops_2_1_0 = { 1536 .get_resources = qcom_pcie_get_resources_2_1_0, 1537 .init = qcom_pcie_init_2_1_0, 1538 .post_init = qcom_pcie_post_init_2_1_0, 1539 .deinit = qcom_pcie_deinit_2_1_0, 1540 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1541 }; 1542 1543 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1544 static const struct qcom_pcie_ops ops_1_0_0 = { 1545 .get_resources = qcom_pcie_get_resources_1_0_0, 1546 .init = qcom_pcie_init_1_0_0, 1547 .post_init = qcom_pcie_post_init_1_0_0, 1548 .deinit = qcom_pcie_deinit_1_0_0, 1549 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1550 }; 1551 1552 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1553 static const struct qcom_pcie_ops ops_2_3_2 = { 1554 .get_resources = qcom_pcie_get_resources_2_3_2, 1555 .init = qcom_pcie_init_2_3_2, 1556 .post_init = qcom_pcie_post_init_2_3_2, 1557 .deinit = qcom_pcie_deinit_2_3_2, 1558 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1559 }; 1560 1561 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1562 static const struct qcom_pcie_ops ops_2_4_0 = { 1563 .get_resources = qcom_pcie_get_resources_2_4_0, 1564 .init = qcom_pcie_init_2_4_0, 1565 .post_init = qcom_pcie_post_init_2_4_0, 1566 .deinit = qcom_pcie_deinit_2_4_0, 1567 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1568 }; 1569 1570 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1571 static const struct qcom_pcie_ops ops_2_3_3 = { 1572 .get_resources = qcom_pcie_get_resources_2_3_3, 1573 .init = qcom_pcie_init_2_3_3, 1574 .post_init = qcom_pcie_post_init_2_3_3, 1575 .deinit = qcom_pcie_deinit_2_3_3, 1576 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1577 }; 1578 1579 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1580 static const struct qcom_pcie_ops ops_2_7_0 = { 1581 .get_resources = qcom_pcie_get_resources_2_7_0, 1582 .init = qcom_pcie_init_2_7_0, 1583 .deinit = qcom_pcie_deinit_2_7_0, 1584 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1585 }; 1586 1587 /* Qcom IP rev.: 1.9.0 */ 1588 static const struct qcom_pcie_ops ops_1_9_0 = { 1589 .get_resources = qcom_pcie_get_resources_2_7_0, 1590 .init = qcom_pcie_init_2_7_0, 1591 .deinit = qcom_pcie_deinit_2_7_0, 1592 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1593 .config_sid = qcom_pcie_config_sid_sm8250, 1594 }; 1595 1596 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1597 static const struct qcom_pcie_ops ops_2_9_0 = { 1598 .get_resources = qcom_pcie_get_resources_2_9_0, 1599 .init = qcom_pcie_init_2_9_0, 1600 .post_init = qcom_pcie_post_init_2_9_0, 1601 .deinit = qcom_pcie_deinit_2_9_0, 1602 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1603 }; 1604 1605 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1606 .ops = &ops_1_0_0, 1607 }; 1608 1609 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1610 .ops = &ops_1_9_0, 1611 }; 1612 1613 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1614 .ops = &ops_2_1_0, 1615 }; 1616 1617 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1618 .ops = &ops_2_3_2, 1619 }; 1620 1621 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1622 .ops = &ops_2_3_3, 1623 }; 1624 1625 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1626 .ops = &ops_2_4_0, 1627 }; 1628 1629 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1630 .ops = &ops_2_7_0, 1631 }; 1632 1633 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1634 .ops = &ops_2_9_0, 1635 }; 1636 1637 static const struct dw_pcie_ops dw_pcie_ops = { 1638 .link_up = qcom_pcie_link_up, 1639 .start_link = qcom_pcie_start_link, 1640 }; 1641 1642 static int qcom_pcie_probe(struct platform_device *pdev) 1643 { 1644 struct device *dev = &pdev->dev; 1645 struct dw_pcie_rp *pp; 1646 struct dw_pcie *pci; 1647 struct qcom_pcie *pcie; 1648 const struct qcom_pcie_cfg *pcie_cfg; 1649 int ret; 1650 1651 pcie_cfg = of_device_get_match_data(dev); 1652 if (!pcie_cfg || !pcie_cfg->ops) { 1653 dev_err(dev, "Invalid platform data\n"); 1654 return -EINVAL; 1655 } 1656 1657 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1658 if (!pcie) 1659 return -ENOMEM; 1660 1661 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1662 if (!pci) 1663 return -ENOMEM; 1664 1665 pm_runtime_enable(dev); 1666 ret = pm_runtime_get_sync(dev); 1667 if (ret < 0) 1668 goto err_pm_runtime_put; 1669 1670 pci->dev = dev; 1671 pci->ops = &dw_pcie_ops; 1672 pp = &pci->pp; 1673 1674 pcie->pci = pci; 1675 1676 pcie->cfg = pcie_cfg; 1677 1678 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1679 if (IS_ERR(pcie->reset)) { 1680 ret = PTR_ERR(pcie->reset); 1681 goto err_pm_runtime_put; 1682 } 1683 1684 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1685 if (IS_ERR(pcie->parf)) { 1686 ret = PTR_ERR(pcie->parf); 1687 goto err_pm_runtime_put; 1688 } 1689 1690 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1691 if (IS_ERR(pcie->elbi)) { 1692 ret = PTR_ERR(pcie->elbi); 1693 goto err_pm_runtime_put; 1694 } 1695 1696 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1697 if (IS_ERR(pcie->phy)) { 1698 ret = PTR_ERR(pcie->phy); 1699 goto err_pm_runtime_put; 1700 } 1701 1702 ret = pcie->cfg->ops->get_resources(pcie); 1703 if (ret) 1704 goto err_pm_runtime_put; 1705 1706 pp->ops = &qcom_pcie_dw_ops; 1707 1708 ret = phy_init(pcie->phy); 1709 if (ret) 1710 goto err_pm_runtime_put; 1711 1712 platform_set_drvdata(pdev, pcie); 1713 1714 ret = dw_pcie_host_init(pp); 1715 if (ret) { 1716 dev_err(dev, "cannot initialize host\n"); 1717 goto err_phy_exit; 1718 } 1719 1720 return 0; 1721 1722 err_phy_exit: 1723 phy_exit(pcie->phy); 1724 err_pm_runtime_put: 1725 pm_runtime_put(dev); 1726 pm_runtime_disable(dev); 1727 1728 return ret; 1729 } 1730 1731 static const struct of_device_id qcom_pcie_match[] = { 1732 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 1733 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 1734 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 1735 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 1736 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 1737 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 1738 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 1739 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 1740 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 1741 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, 1742 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 1743 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 1744 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, 1745 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 1746 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 1747 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 1748 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 1749 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 1750 { } 1751 }; 1752 1753 static void qcom_fixup_class(struct pci_dev *dev) 1754 { 1755 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1756 } 1757 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1758 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1759 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1760 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1761 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1762 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1763 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1764 1765 static struct platform_driver qcom_pcie_driver = { 1766 .probe = qcom_pcie_probe, 1767 .driver = { 1768 .name = "qcom-pcie", 1769 .suppress_bind_attrs = true, 1770 .of_match_table = qcom_pcie_match, 1771 }, 1772 }; 1773 builtin_platform_driver(qcom_pcie_driver); 1774