1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe Endpoint controller driver 4 * 5 * Copyright (c) 2020, The Linux Foundation. All rights reserved. 6 * Author: Siddartha Mohanadoss <smohanad@codeaurora.org 7 * 8 * Copyright (c) 2021, Linaro Ltd. 9 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/phy/phy.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_domain.h> 19 #include <linux/regmap.h> 20 #include <linux/reset.h> 21 22 #include "pcie-designware.h" 23 24 /* PARF registers */ 25 #define PARF_SYS_CTRL 0x00 26 #define PARF_DB_CTRL 0x10 27 #define PARF_PM_CTRL 0x20 28 #define PARF_MHI_BASE_ADDR_LOWER 0x178 29 #define PARF_MHI_BASE_ADDR_UPPER 0x17c 30 #define PARF_DEBUG_INT_EN 0x190 31 #define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4 32 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8 33 #define PARF_Q2A_FLUSH 0x1ac 34 #define PARF_LTSSM 0x1b0 35 #define PARF_CFG_BITS 0x210 36 #define PARF_INT_ALL_STATUS 0x224 37 #define PARF_INT_ALL_CLEAR 0x228 38 #define PARF_INT_ALL_MASK 0x22c 39 #define PARF_SLV_ADDR_MSB_CTRL 0x2c0 40 #define PARF_DBI_BASE_ADDR 0x350 41 #define PARF_DBI_BASE_ADDR_HI 0x354 42 #define PARF_SLV_ADDR_SPACE_SIZE 0x358 43 #define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c 44 #define PARF_ATU_BASE_ADDR 0x634 45 #define PARF_ATU_BASE_ADDR_HI 0x638 46 #define PARF_SRIS_MODE 0x644 47 #define PARF_DEVICE_TYPE 0x1000 48 #define PARF_BDF_TO_SID_CFG 0x2c00 49 50 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ 51 #define PARF_INT_ALL_LINK_DOWN BIT(1) 52 #define PARF_INT_ALL_BME BIT(2) 53 #define PARF_INT_ALL_PM_TURNOFF BIT(3) 54 #define PARF_INT_ALL_DEBUG BIT(4) 55 #define PARF_INT_ALL_LTR BIT(5) 56 #define PARF_INT_ALL_MHI_Q6 BIT(6) 57 #define PARF_INT_ALL_MHI_A7 BIT(7) 58 #define PARF_INT_ALL_DSTATE_CHANGE BIT(8) 59 #define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9) 60 #define PARF_INT_ALL_MMIO_WRITE BIT(10) 61 #define PARF_INT_ALL_CFG_WRITE BIT(11) 62 #define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12) 63 #define PARF_INT_ALL_LINK_UP BIT(13) 64 #define PARF_INT_ALL_AER_LEGACY BIT(14) 65 #define PARF_INT_ALL_PLS_ERR BIT(15) 66 #define PARF_INT_ALL_PME_LEGACY BIT(16) 67 #define PARF_INT_ALL_PLS_PME BIT(17) 68 69 /* PARF_BDF_TO_SID_CFG register fields */ 70 #define PARF_BDF_TO_SID_BYPASS BIT(0) 71 72 /* PARF_DEBUG_INT_EN register fields */ 73 #define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1) 74 #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2) 75 #define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3) 76 77 /* PARF_DEVICE_TYPE register fields */ 78 #define PARF_DEVICE_TYPE_EP 0x0 79 80 /* PARF_PM_CTRL register fields */ 81 #define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1) 82 #define PARF_PM_CTRL_READY_ENTR_L23 BIT(2) 83 #define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5) 84 85 /* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */ 86 #define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0) 87 88 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 89 #define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31) 90 91 /* PARF_Q2A_FLUSH register fields */ 92 #define PARF_Q2A_FLUSH_EN BIT(16) 93 94 /* PARF_SYS_CTRL register fields */ 95 #define PARF_SYS_CTRL_AUX_PWR_DET BIT(4) 96 #define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6) 97 #define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11) 98 99 /* PARF_DB_CTRL register fields */ 100 #define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0) 101 #define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1) 102 #define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4) 103 #define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5) 104 #define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6) 105 106 /* PARF_CFG_BITS register fields */ 107 #define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1) 108 109 /* ELBI registers */ 110 #define ELBI_SYS_STTS 0x08 111 112 /* DBI registers */ 113 #define DBI_CON_STATUS 0x44 114 115 /* DBI register fields */ 116 #define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0) 117 118 #define XMLH_LINK_UP 0x400 119 #define CORE_RESET_TIME_US_MIN 1000 120 #define CORE_RESET_TIME_US_MAX 1005 121 #define WAKE_DELAY_US 2000 /* 2 ms */ 122 123 #define to_pcie_ep(x) dev_get_drvdata((x)->dev) 124 125 enum qcom_pcie_ep_link_status { 126 QCOM_PCIE_EP_LINK_DISABLED, 127 QCOM_PCIE_EP_LINK_ENABLED, 128 QCOM_PCIE_EP_LINK_UP, 129 QCOM_PCIE_EP_LINK_DOWN, 130 }; 131 132 static struct clk_bulk_data qcom_pcie_ep_clks[] = { 133 { .id = "cfg" }, 134 { .id = "aux" }, 135 { .id = "bus_master" }, 136 { .id = "bus_slave" }, 137 { .id = "ref" }, 138 { .id = "sleep" }, 139 { .id = "slave_q2a" }, 140 }; 141 142 struct qcom_pcie_ep { 143 struct dw_pcie pci; 144 145 void __iomem *parf; 146 void __iomem *elbi; 147 struct regmap *perst_map; 148 struct resource *mmio_res; 149 150 struct reset_control *core_reset; 151 struct gpio_desc *reset; 152 struct gpio_desc *wake; 153 struct phy *phy; 154 155 u32 perst_en; 156 u32 perst_sep_en; 157 158 enum qcom_pcie_ep_link_status link_status; 159 int global_irq; 160 int perst_irq; 161 }; 162 163 static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep) 164 { 165 struct dw_pcie *pci = &pcie_ep->pci; 166 struct device *dev = pci->dev; 167 int ret; 168 169 ret = reset_control_assert(pcie_ep->core_reset); 170 if (ret) { 171 dev_err(dev, "Cannot assert core reset\n"); 172 return ret; 173 } 174 175 usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); 176 177 ret = reset_control_deassert(pcie_ep->core_reset); 178 if (ret) { 179 dev_err(dev, "Cannot de-assert core reset\n"); 180 return ret; 181 } 182 183 usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); 184 185 return 0; 186 } 187 188 /* 189 * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid 190 * device reset during host reboot and hibernation. The driver is 191 * expected to handle this situation. 192 */ 193 static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep) 194 { 195 regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0); 196 regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0); 197 } 198 199 static int qcom_pcie_dw_link_up(struct dw_pcie *pci) 200 { 201 struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); 202 u32 reg; 203 204 reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS); 205 206 return reg & XMLH_LINK_UP; 207 } 208 209 static int qcom_pcie_dw_start_link(struct dw_pcie *pci) 210 { 211 struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); 212 213 enable_irq(pcie_ep->perst_irq); 214 215 return 0; 216 } 217 218 static void qcom_pcie_dw_stop_link(struct dw_pcie *pci) 219 { 220 struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); 221 222 disable_irq(pcie_ep->perst_irq); 223 } 224 225 static int qcom_pcie_perst_deassert(struct dw_pcie *pci) 226 { 227 struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); 228 struct device *dev = pci->dev; 229 u32 val, offset; 230 int ret; 231 232 ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), 233 qcom_pcie_ep_clks); 234 if (ret) 235 return ret; 236 237 ret = qcom_pcie_ep_core_reset(pcie_ep); 238 if (ret) 239 goto err_disable_clk; 240 241 ret = phy_init(pcie_ep->phy); 242 if (ret) 243 goto err_disable_clk; 244 245 ret = phy_power_on(pcie_ep->phy); 246 if (ret) 247 goto err_phy_exit; 248 249 /* Assert WAKE# to RC to indicate device is ready */ 250 gpiod_set_value_cansleep(pcie_ep->wake, 1); 251 usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500); 252 gpiod_set_value_cansleep(pcie_ep->wake, 0); 253 254 qcom_pcie_ep_configure_tcsr(pcie_ep); 255 256 /* Disable BDF to SID mapping */ 257 val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG); 258 val |= PARF_BDF_TO_SID_BYPASS; 259 writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG); 260 261 /* Enable debug IRQ */ 262 val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN); 263 val |= PARF_DEBUG_INT_RADM_PM_TURNOFF | 264 PARF_DEBUG_INT_CFG_BUS_MASTER_EN | 265 PARF_DEBUG_INT_PM_DSTATE_CHANGE; 266 writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN); 267 268 /* Configure PCIe to endpoint mode */ 269 writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE); 270 271 /* Allow entering L1 state */ 272 val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); 273 val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1; 274 writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); 275 276 /* Read halts write */ 277 val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); 278 val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN; 279 writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); 280 281 /* Write after write halt */ 282 val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 283 val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN; 284 writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 285 286 /* Q2A flush disable */ 287 val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH); 288 val &= ~PARF_Q2A_FLUSH_EN; 289 writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH); 290 291 /* Disable DBI Wakeup, core clock CGC and enable AUX power */ 292 val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL); 293 val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE | 294 PARF_SYS_CTRL_CORE_CLK_CGC_DIS | 295 PARF_SYS_CTRL_AUX_PWR_DET; 296 writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL); 297 298 /* Disable the debouncers */ 299 val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL); 300 val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK | 301 PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK | 302 PARF_DB_CTRL_MST_WKP_BLOCK; 303 writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL); 304 305 /* Request to exit from L1SS for MSI and LTR MSG */ 306 val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS); 307 val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN; 308 writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS); 309 310 dw_pcie_dbi_ro_wr_en(pci); 311 312 /* Set the L0s Exit Latency to 2us-4us = 0x6 */ 313 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 314 val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 315 val &= ~PCI_EXP_LNKCAP_L0SEL; 316 val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6); 317 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val); 318 319 /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */ 320 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 321 val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); 322 val &= ~PCI_EXP_LNKCAP_L1EL; 323 val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6); 324 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val); 325 326 dw_pcie_dbi_ro_wr_dis(pci); 327 328 writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK); 329 val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME | 330 PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE | 331 PARF_INT_ALL_LINK_UP; 332 writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK); 333 334 ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep); 335 if (ret) { 336 dev_err(dev, "Failed to complete initialization: %d\n", ret); 337 goto err_phy_power_off; 338 } 339 340 /* 341 * The physical address of the MMIO region which is exposed as the BAR 342 * should be written to MHI BASE registers. 343 */ 344 writel_relaxed(pcie_ep->mmio_res->start, 345 pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER); 346 writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER); 347 348 dw_pcie_ep_init_notify(&pcie_ep->pci.ep); 349 350 /* Enable LTSSM */ 351 val = readl_relaxed(pcie_ep->parf + PARF_LTSSM); 352 val |= BIT(8); 353 writel_relaxed(val, pcie_ep->parf + PARF_LTSSM); 354 355 return 0; 356 357 err_phy_power_off: 358 phy_power_off(pcie_ep->phy); 359 err_phy_exit: 360 phy_exit(pcie_ep->phy); 361 err_disable_clk: 362 clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), 363 qcom_pcie_ep_clks); 364 365 return ret; 366 } 367 368 static void qcom_pcie_perst_assert(struct dw_pcie *pci) 369 { 370 struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); 371 struct device *dev = pci->dev; 372 373 if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) { 374 dev_dbg(dev, "Link is already disabled\n"); 375 return; 376 } 377 378 phy_power_off(pcie_ep->phy); 379 phy_exit(pcie_ep->phy); 380 clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), 381 qcom_pcie_ep_clks); 382 pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED; 383 } 384 385 /* Common DWC controller ops */ 386 static const struct dw_pcie_ops pci_ops = { 387 .link_up = qcom_pcie_dw_link_up, 388 .start_link = qcom_pcie_dw_start_link, 389 .stop_link = qcom_pcie_dw_stop_link, 390 }; 391 392 static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev, 393 struct qcom_pcie_ep *pcie_ep) 394 { 395 struct device *dev = &pdev->dev; 396 struct dw_pcie *pci = &pcie_ep->pci; 397 struct device_node *syscon; 398 struct resource *res; 399 int ret; 400 401 pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 402 if (IS_ERR(pcie_ep->parf)) 403 return PTR_ERR(pcie_ep->parf); 404 405 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 406 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); 407 if (IS_ERR(pci->dbi_base)) 408 return PTR_ERR(pci->dbi_base); 409 pci->dbi_base2 = pci->dbi_base; 410 411 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); 412 pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res); 413 if (IS_ERR(pcie_ep->elbi)) 414 return PTR_ERR(pcie_ep->elbi); 415 416 pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 417 "mmio"); 418 419 syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0); 420 if (!syscon) { 421 dev_err(dev, "Failed to parse qcom,perst-regs\n"); 422 return -EINVAL; 423 } 424 425 pcie_ep->perst_map = syscon_node_to_regmap(syscon); 426 of_node_put(syscon); 427 if (IS_ERR(pcie_ep->perst_map)) 428 return PTR_ERR(pcie_ep->perst_map); 429 430 ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs", 431 1, &pcie_ep->perst_en); 432 if (ret < 0) { 433 dev_err(dev, "No Perst Enable offset in syscon\n"); 434 return ret; 435 } 436 437 ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs", 438 2, &pcie_ep->perst_sep_en); 439 if (ret < 0) { 440 dev_err(dev, "No Perst Separation Enable offset in syscon\n"); 441 return ret; 442 } 443 444 return 0; 445 } 446 447 static int qcom_pcie_ep_get_resources(struct platform_device *pdev, 448 struct qcom_pcie_ep *pcie_ep) 449 { 450 struct device *dev = &pdev->dev; 451 int ret; 452 453 ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep); 454 if (ret) { 455 dev_err(&pdev->dev, "Failed to get io resources %d\n", ret); 456 return ret; 457 } 458 459 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks), 460 qcom_pcie_ep_clks); 461 if (ret) 462 return ret; 463 464 pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core"); 465 if (IS_ERR(pcie_ep->core_reset)) 466 return PTR_ERR(pcie_ep->core_reset); 467 468 pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN); 469 if (IS_ERR(pcie_ep->reset)) 470 return PTR_ERR(pcie_ep->reset); 471 472 pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW); 473 if (IS_ERR(pcie_ep->wake)) 474 return PTR_ERR(pcie_ep->wake); 475 476 pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy"); 477 if (IS_ERR(pcie_ep->phy)) 478 ret = PTR_ERR(pcie_ep->phy); 479 480 return ret; 481 } 482 483 /* TODO: Notify clients about PCIe state change */ 484 static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data) 485 { 486 struct qcom_pcie_ep *pcie_ep = data; 487 struct dw_pcie *pci = &pcie_ep->pci; 488 struct device *dev = pci->dev; 489 u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS); 490 u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK); 491 u32 dstate, val; 492 493 writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR); 494 status &= mask; 495 496 if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) { 497 dev_dbg(dev, "Received Linkdown event\n"); 498 pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN; 499 } else if (FIELD_GET(PARF_INT_ALL_BME, status)) { 500 dev_dbg(dev, "Received BME event. Link is enabled!\n"); 501 pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED; 502 } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) { 503 dev_dbg(dev, "Received PM Turn-off event! Entering L23\n"); 504 val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); 505 val |= PARF_PM_CTRL_READY_ENTR_L23; 506 writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); 507 } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) { 508 dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) & 509 DBI_CON_STATUS_POWER_STATE_MASK; 510 dev_dbg(dev, "Received D%d state event\n", dstate); 511 if (dstate == 3) { 512 val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); 513 val |= PARF_PM_CTRL_REQ_EXIT_L1; 514 writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); 515 } 516 } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { 517 dev_dbg(dev, "Received Linkup event. Enumeration complete!\n"); 518 dw_pcie_ep_linkup(&pci->ep); 519 pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP; 520 } else { 521 dev_dbg(dev, "Received unknown event: %d\n", status); 522 } 523 524 return IRQ_HANDLED; 525 } 526 527 static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data) 528 { 529 struct qcom_pcie_ep *pcie_ep = data; 530 struct dw_pcie *pci = &pcie_ep->pci; 531 struct device *dev = pci->dev; 532 u32 perst; 533 534 perst = gpiod_get_value(pcie_ep->reset); 535 if (perst) { 536 dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n"); 537 qcom_pcie_perst_assert(pci); 538 } else { 539 dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n"); 540 qcom_pcie_perst_deassert(pci); 541 } 542 543 irq_set_irq_type(gpiod_to_irq(pcie_ep->reset), 544 (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW)); 545 546 return IRQ_HANDLED; 547 } 548 549 static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, 550 struct qcom_pcie_ep *pcie_ep) 551 { 552 int irq, ret; 553 554 irq = platform_get_irq_byname(pdev, "global"); 555 if (irq < 0) { 556 dev_err(&pdev->dev, "Failed to get Global IRQ\n"); 557 return irq; 558 } 559 560 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 561 qcom_pcie_ep_global_irq_thread, 562 IRQF_ONESHOT, 563 "global_irq", pcie_ep); 564 if (ret) { 565 dev_err(&pdev->dev, "Failed to request Global IRQ\n"); 566 return ret; 567 } 568 569 pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset); 570 irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN); 571 ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL, 572 qcom_pcie_ep_perst_irq_thread, 573 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 574 "perst_irq", pcie_ep); 575 if (ret) { 576 dev_err(&pdev->dev, "Failed to request PERST IRQ\n"); 577 disable_irq(irq); 578 return ret; 579 } 580 581 return 0; 582 } 583 584 static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 585 enum pci_epc_irq_type type, u16 interrupt_num) 586 { 587 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 588 589 switch (type) { 590 case PCI_EPC_IRQ_LEGACY: 591 return dw_pcie_ep_raise_legacy_irq(ep, func_no); 592 case PCI_EPC_IRQ_MSI: 593 return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); 594 default: 595 dev_err(pci->dev, "Unknown IRQ type\n"); 596 return -EINVAL; 597 } 598 } 599 600 static const struct pci_epc_features qcom_pcie_epc_features = { 601 .linkup_notifier = true, 602 .core_init_notifier = true, 603 .msi_capable = true, 604 .msix_capable = false, 605 }; 606 607 static const struct pci_epc_features * 608 qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep) 609 { 610 return &qcom_pcie_epc_features; 611 } 612 613 static void qcom_pcie_ep_init(struct dw_pcie_ep *ep) 614 { 615 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 616 enum pci_barno bar; 617 618 for (bar = BAR_0; bar <= BAR_5; bar++) 619 dw_pcie_ep_reset_bar(pci, bar); 620 } 621 622 static struct dw_pcie_ep_ops pci_ep_ops = { 623 .ep_init = qcom_pcie_ep_init, 624 .raise_irq = qcom_pcie_ep_raise_irq, 625 .get_features = qcom_pcie_epc_get_features, 626 }; 627 628 static int qcom_pcie_ep_probe(struct platform_device *pdev) 629 { 630 struct device *dev = &pdev->dev; 631 struct qcom_pcie_ep *pcie_ep; 632 int ret; 633 634 pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL); 635 if (!pcie_ep) 636 return -ENOMEM; 637 638 pcie_ep->pci.dev = dev; 639 pcie_ep->pci.ops = &pci_ops; 640 pcie_ep->pci.ep.ops = &pci_ep_ops; 641 platform_set_drvdata(pdev, pcie_ep); 642 643 ret = qcom_pcie_ep_get_resources(pdev, pcie_ep); 644 if (ret) 645 return ret; 646 647 ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), 648 qcom_pcie_ep_clks); 649 if (ret) 650 return ret; 651 652 ret = qcom_pcie_ep_core_reset(pcie_ep); 653 if (ret) 654 goto err_disable_clk; 655 656 ret = phy_init(pcie_ep->phy); 657 if (ret) 658 goto err_disable_clk; 659 660 /* PHY needs to be powered on for dw_pcie_ep_init() */ 661 ret = phy_power_on(pcie_ep->phy); 662 if (ret) 663 goto err_phy_exit; 664 665 ret = dw_pcie_ep_init(&pcie_ep->pci.ep); 666 if (ret) { 667 dev_err(dev, "Failed to initialize endpoint: %d\n", ret); 668 goto err_phy_power_off; 669 } 670 671 ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep); 672 if (ret) 673 goto err_phy_power_off; 674 675 return 0; 676 677 err_phy_power_off: 678 phy_power_off(pcie_ep->phy); 679 err_phy_exit: 680 phy_exit(pcie_ep->phy); 681 err_disable_clk: 682 clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), 683 qcom_pcie_ep_clks); 684 685 return ret; 686 } 687 688 static int qcom_pcie_ep_remove(struct platform_device *pdev) 689 { 690 struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev); 691 692 if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) 693 return 0; 694 695 phy_power_off(pcie_ep->phy); 696 phy_exit(pcie_ep->phy); 697 clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), 698 qcom_pcie_ep_clks); 699 700 return 0; 701 } 702 703 static const struct of_device_id qcom_pcie_ep_match[] = { 704 { .compatible = "qcom,sdx55-pcie-ep", }, 705 { } 706 }; 707 708 static struct platform_driver qcom_pcie_ep_driver = { 709 .probe = qcom_pcie_ep_probe, 710 .remove = qcom_pcie_ep_remove, 711 .driver = { 712 .name = "qcom-pcie-ep", 713 .of_match_table = qcom_pcie_ep_match, 714 }, 715 }; 716 builtin_platform_driver(qcom_pcie_ep_driver); 717 718 MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>"); 719 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); 720 MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver"); 721 MODULE_LICENSE("GPL v2"); 722