1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for Tegra194 SoC 4 * 5 * Copyright (C) 2019 NVIDIA Corporation. 6 * 7 * Author: Vidya Sagar <vidyas@nvidia.com> 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/debugfs.h> 12 #include <linux/delay.h> 13 #include <linux/gpio.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interrupt.h> 16 #include <linux/iopoll.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_pci.h> 24 #include <linux/pci.h> 25 #include <linux/phy/phy.h> 26 #include <linux/pinctrl/consumer.h> 27 #include <linux/platform_device.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/random.h> 30 #include <linux/reset.h> 31 #include <linux/resource.h> 32 #include <linux/types.h> 33 #include "pcie-designware.h" 34 #include <soc/tegra/bpmp.h> 35 #include <soc/tegra/bpmp-abi.h> 36 #include "../../pci.h" 37 38 #define APPL_PINMUX 0x0 39 #define APPL_PINMUX_PEX_RST BIT(0) 40 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) 41 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) 42 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) 43 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) 44 45 #define APPL_CTRL 0x4 46 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) 47 #define APPL_CTRL_LTSSM_EN BIT(7) 48 #define APPL_CTRL_HW_HOT_RST_EN BIT(20) 49 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) 50 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 51 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 52 53 #define APPL_INTR_EN_L0_0 0x8 54 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) 55 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) 56 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) 57 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) 58 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) 59 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) 60 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) 61 62 #define APPL_INTR_STATUS_L0 0xC 63 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) 64 #define APPL_INTR_STATUS_L0_INT_INT BIT(8) 65 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) 66 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) 67 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) 68 69 #define APPL_INTR_EN_L1_0_0 0x1C 70 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) 71 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) 72 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) 73 74 #define APPL_INTR_STATUS_L1_0_0 0x20 75 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) 76 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) 77 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) 78 79 #define APPL_INTR_STATUS_L1_1 0x2C 80 #define APPL_INTR_STATUS_L1_2 0x30 81 #define APPL_INTR_STATUS_L1_3 0x34 82 #define APPL_INTR_STATUS_L1_6 0x3C 83 #define APPL_INTR_STATUS_L1_7 0x40 84 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) 85 86 #define APPL_INTR_EN_L1_8_0 0x44 87 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) 88 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) 89 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11) 90 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) 91 92 #define APPL_INTR_STATUS_L1_8_0 0x4C 93 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) 94 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) 95 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) 96 97 #define APPL_INTR_STATUS_L1_9 0x54 98 #define APPL_INTR_STATUS_L1_10 0x58 99 #define APPL_INTR_STATUS_L1_11 0x64 100 #define APPL_INTR_STATUS_L1_13 0x74 101 #define APPL_INTR_STATUS_L1_14 0x78 102 #define APPL_INTR_STATUS_L1_15 0x7C 103 #define APPL_INTR_STATUS_L1_17 0x88 104 105 #define APPL_INTR_EN_L1_18 0x90 106 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) 107 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 108 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 109 110 #define APPL_INTR_STATUS_L1_18 0x94 111 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) 112 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 113 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 114 115 #define APPL_MSI_CTRL_1 0xAC 116 117 #define APPL_MSI_CTRL_2 0xB0 118 119 #define APPL_LEGACY_INTX 0xB8 120 121 #define APPL_LTR_MSG_1 0xC4 122 #define LTR_MSG_REQ BIT(15) 123 #define LTR_MST_NO_SNOOP_SHIFT 16 124 125 #define APPL_LTR_MSG_2 0xC8 126 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) 127 128 #define APPL_LINK_STATUS 0xCC 129 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) 130 131 #define APPL_DEBUG 0xD0 132 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) 133 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11 134 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) 135 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 136 #define LTSSM_STATE_PRE_DETECT 5 137 138 #define APPL_RADM_STATUS 0xE4 139 #define APPL_PM_XMT_TURNOFF_STATE BIT(0) 140 141 #define APPL_DM_TYPE 0x100 142 #define APPL_DM_TYPE_MASK GENMASK(3, 0) 143 #define APPL_DM_TYPE_RP 0x4 144 #define APPL_DM_TYPE_EP 0x0 145 146 #define APPL_CFG_BASE_ADDR 0x104 147 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) 148 149 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 150 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) 151 152 #define APPL_CFG_MISC 0x110 153 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14) 154 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) 155 #define APPL_CFG_MISC_ARCACHE_SHIFT 10 156 #define APPL_CFG_MISC_ARCACHE_VAL 3 157 158 #define APPL_CFG_SLCG_OVERRIDE 0x114 159 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) 160 161 #define APPL_CAR_RESET_OVRD 0x12C 162 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) 163 164 #define IO_BASE_IO_DECODE BIT(0) 165 #define IO_BASE_IO_DECODE_BIT8 BIT(8) 166 167 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) 168 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) 169 170 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 171 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) 172 173 #define EVENT_COUNTER_ALL_CLEAR 0x3 174 #define EVENT_COUNTER_ENABLE_ALL 0x7 175 #define EVENT_COUNTER_ENABLE_SHIFT 2 176 #define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0) 177 #define EVENT_COUNTER_EVENT_SEL_SHIFT 16 178 #define EVENT_COUNTER_EVENT_Tx_L0S 0x2 179 #define EVENT_COUNTER_EVENT_Rx_L0S 0x3 180 #define EVENT_COUNTER_EVENT_L1 0x5 181 #define EVENT_COUNTER_EVENT_L1_1 0x7 182 #define EVENT_COUNTER_EVENT_L1_2 0x8 183 #define EVENT_COUNTER_GROUP_SEL_SHIFT 24 184 #define EVENT_COUNTER_GROUP_5 0x5 185 186 #define N_FTS_VAL 52 187 #define FTS_VAL 52 188 189 #define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828 190 191 #define GEN3_EQ_CONTROL_OFF 0x8a8 192 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 193 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) 194 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) 195 196 #define GEN3_RELATED_OFF 0x890 197 #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0) 198 #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) 199 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 200 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24) 201 202 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 203 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 204 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) 205 #define AMBA_ERROR_RESPONSE_CRS_OKAY 0 206 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 207 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 208 209 #define MSIX_ADDR_MATCH_LOW_OFF 0x940 210 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) 211 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) 212 213 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944 214 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) 215 216 #define PORT_LOGIC_MSIX_DOORBELL 0x948 217 218 #define CAP_SPCIE_CAP_OFF 0x154 219 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) 220 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) 221 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 222 223 #define PME_ACK_TIMEOUT 10000 224 225 #define LTSSM_TIMEOUT 50000 /* 50ms */ 226 227 #define GEN3_GEN4_EQ_PRESET_INIT 5 228 229 #define GEN1_CORE_CLK_FREQ 62500000 230 #define GEN2_CORE_CLK_FREQ 125000000 231 #define GEN3_CORE_CLK_FREQ 250000000 232 #define GEN4_CORE_CLK_FREQ 500000000 233 234 #define LTR_MSG_TIMEOUT (100 * 1000) 235 236 #define PERST_DEBOUNCE_TIME (5 * 1000) 237 238 #define EP_STATE_DISABLED 0 239 #define EP_STATE_ENABLED 1 240 241 static const unsigned int pcie_gen_freq[] = { 242 GEN1_CORE_CLK_FREQ, 243 GEN2_CORE_CLK_FREQ, 244 GEN3_CORE_CLK_FREQ, 245 GEN4_CORE_CLK_FREQ 246 }; 247 248 static const u32 event_cntr_ctrl_offset[] = { 249 0x1d8, 250 0x1a8, 251 0x1a8, 252 0x1a8, 253 0x1c4, 254 0x1d8 255 }; 256 257 static const u32 event_cntr_data_offset[] = { 258 0x1dc, 259 0x1ac, 260 0x1ac, 261 0x1ac, 262 0x1c8, 263 0x1dc 264 }; 265 266 struct tegra_pcie_dw { 267 struct device *dev; 268 struct resource *appl_res; 269 struct resource *dbi_res; 270 struct resource *atu_dma_res; 271 void __iomem *appl_base; 272 struct clk *core_clk; 273 struct reset_control *core_apb_rst; 274 struct reset_control *core_rst; 275 struct dw_pcie pci; 276 struct tegra_bpmp *bpmp; 277 278 enum dw_pcie_device_mode mode; 279 280 bool supports_clkreq; 281 bool enable_cdm_check; 282 bool link_state; 283 bool update_fc_fixup; 284 u8 init_link_width; 285 u32 msi_ctrl_int; 286 u32 num_lanes; 287 u32 cid; 288 u32 cfg_link_cap_l1sub; 289 u32 pcie_cap_base; 290 u32 aspm_cmrt; 291 u32 aspm_pwr_on_t; 292 u32 aspm_l0s_enter_lat; 293 294 struct regulator *pex_ctl_supply; 295 struct regulator *slot_ctl_3v3; 296 struct regulator *slot_ctl_12v; 297 298 unsigned int phy_count; 299 struct phy **phys; 300 301 struct dentry *debugfs; 302 303 /* Endpoint mode specific */ 304 struct gpio_desc *pex_rst_gpiod; 305 struct gpio_desc *pex_refclk_sel_gpiod; 306 unsigned int pex_rst_irq; 307 int ep_state; 308 }; 309 310 struct tegra_pcie_dw_of_data { 311 enum dw_pcie_device_mode mode; 312 }; 313 314 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) 315 { 316 return container_of(pci, struct tegra_pcie_dw, pci); 317 } 318 319 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, 320 const u32 reg) 321 { 322 writel_relaxed(value, pcie->appl_base + reg); 323 } 324 325 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) 326 { 327 return readl_relaxed(pcie->appl_base + reg); 328 } 329 330 struct tegra_pcie_soc { 331 enum dw_pcie_device_mode mode; 332 }; 333 334 static void apply_bad_link_workaround(struct pcie_port *pp) 335 { 336 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 337 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 338 u32 current_link_width; 339 u16 val; 340 341 /* 342 * NOTE:- Since this scenario is uncommon and link as such is not 343 * stable anyway, not waiting to confirm if link is really 344 * transitioning to Gen-2 speed 345 */ 346 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 347 if (val & PCI_EXP_LNKSTA_LBMS) { 348 current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> 349 PCI_EXP_LNKSTA_NLW_SHIFT; 350 if (pcie->init_link_width > current_link_width) { 351 dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); 352 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 353 PCI_EXP_LNKCTL2); 354 val &= ~PCI_EXP_LNKCTL2_TLS; 355 val |= PCI_EXP_LNKCTL2_TLS_2_5GT; 356 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 357 PCI_EXP_LNKCTL2, val); 358 359 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 360 PCI_EXP_LNKCTL); 361 val |= PCI_EXP_LNKCTL_RL; 362 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 363 PCI_EXP_LNKCTL, val); 364 } 365 } 366 } 367 368 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) 369 { 370 struct tegra_pcie_dw *pcie = arg; 371 struct dw_pcie *pci = &pcie->pci; 372 struct pcie_port *pp = &pci->pp; 373 u32 val, tmp; 374 u16 val_w; 375 376 val = appl_readl(pcie, APPL_INTR_STATUS_L0); 377 if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 378 val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 379 if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { 380 appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); 381 382 /* SBR & Surprise Link Down WAR */ 383 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 384 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 385 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 386 udelay(1); 387 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 388 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 389 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 390 391 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 392 val |= PORT_LOGIC_SPEED_CHANGE; 393 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 394 } 395 } 396 397 if (val & APPL_INTR_STATUS_L0_INT_INT) { 398 val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); 399 if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { 400 appl_writel(pcie, 401 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, 402 APPL_INTR_STATUS_L1_8_0); 403 apply_bad_link_workaround(pp); 404 } 405 if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { 406 appl_writel(pcie, 407 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, 408 APPL_INTR_STATUS_L1_8_0); 409 410 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 411 PCI_EXP_LNKSTA); 412 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & 413 PCI_EXP_LNKSTA_CLS); 414 } 415 } 416 417 val = appl_readl(pcie, APPL_INTR_STATUS_L0); 418 if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { 419 val = appl_readl(pcie, APPL_INTR_STATUS_L1_18); 420 tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 421 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { 422 dev_info(pci->dev, "CDM check complete\n"); 423 tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; 424 } 425 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { 426 dev_err(pci->dev, "CDM comparison mismatch\n"); 427 tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; 428 } 429 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { 430 dev_err(pci->dev, "CDM Logic error\n"); 431 tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; 432 } 433 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp); 434 tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); 435 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp); 436 } 437 438 return IRQ_HANDLED; 439 } 440 441 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) 442 { 443 u32 val; 444 445 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 446 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 447 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 448 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 449 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 450 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 451 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 452 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 453 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 454 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 455 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 456 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 457 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 458 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 459 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 460 appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); 461 462 val = appl_readl(pcie, APPL_CTRL); 463 val |= APPL_CTRL_LTSSM_EN; 464 appl_writel(pcie, val, APPL_CTRL); 465 } 466 467 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) 468 { 469 struct tegra_pcie_dw *pcie = arg; 470 struct dw_pcie *pci = &pcie->pci; 471 u32 val, speed; 472 473 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & 474 PCI_EXP_LNKSTA_CLS; 475 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); 476 477 /* If EP doesn't advertise L1SS, just return */ 478 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 479 if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) 480 return IRQ_HANDLED; 481 482 /* Check if BME is set to '1' */ 483 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 484 if (val & PCI_COMMAND_MASTER) { 485 ktime_t timeout; 486 487 /* 110us for both snoop and no-snoop */ 488 val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; 489 val |= (val << LTR_MST_NO_SNOOP_SHIFT); 490 appl_writel(pcie, val, APPL_LTR_MSG_1); 491 492 /* Send LTR upstream */ 493 val = appl_readl(pcie, APPL_LTR_MSG_2); 494 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 495 appl_writel(pcie, val, APPL_LTR_MSG_2); 496 497 timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); 498 for (;;) { 499 val = appl_readl(pcie, APPL_LTR_MSG_2); 500 if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) 501 break; 502 if (ktime_after(ktime_get(), timeout)) 503 break; 504 usleep_range(1000, 1100); 505 } 506 if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) 507 dev_err(pcie->dev, "Failed to send LTR message\n"); 508 } 509 510 return IRQ_HANDLED; 511 } 512 513 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) 514 { 515 struct tegra_pcie_dw *pcie = arg; 516 struct dw_pcie_ep *ep = &pcie->pci.ep; 517 int spurious = 1; 518 u32 val, tmp; 519 520 val = appl_readl(pcie, APPL_INTR_STATUS_L0); 521 if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 522 val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 523 appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); 524 525 if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) 526 pex_ep_event_hot_rst_done(pcie); 527 528 if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { 529 tmp = appl_readl(pcie, APPL_LINK_STATUS); 530 if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) { 531 dev_dbg(pcie->dev, "Link is up with Host\n"); 532 dw_pcie_ep_linkup(ep); 533 } 534 } 535 536 spurious = 0; 537 } 538 539 if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { 540 val = appl_readl(pcie, APPL_INTR_STATUS_L1_15); 541 appl_writel(pcie, val, APPL_INTR_STATUS_L1_15); 542 543 if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) 544 return IRQ_WAKE_THREAD; 545 546 spurious = 0; 547 } 548 549 if (spurious) { 550 dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", 551 val); 552 appl_writel(pcie, val, APPL_INTR_STATUS_L0); 553 } 554 555 return IRQ_HANDLED; 556 } 557 558 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, 559 int size, u32 *val) 560 { 561 /* 562 * This is an endpoint mode specific register happen to appear even 563 * when controller is operating in root port mode and system hangs 564 * when it is accessed with link being in ASPM-L1 state. 565 * So skip accessing it altogether 566 */ 567 if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { 568 *val = 0x00000000; 569 return PCIBIOS_SUCCESSFUL; 570 } 571 572 return pci_generic_config_read(bus, devfn, where, size, val); 573 } 574 575 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, 576 int size, u32 val) 577 { 578 /* 579 * This is an endpoint mode specific register happen to appear even 580 * when controller is operating in root port mode and system hangs 581 * when it is accessed with link being in ASPM-L1 state. 582 * So skip accessing it altogether 583 */ 584 if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) 585 return PCIBIOS_SUCCESSFUL; 586 587 return pci_generic_config_write(bus, devfn, where, size, val); 588 } 589 590 static struct pci_ops tegra_pci_ops = { 591 .map_bus = dw_pcie_own_conf_map_bus, 592 .read = tegra_pcie_dw_rd_own_conf, 593 .write = tegra_pcie_dw_wr_own_conf, 594 }; 595 596 #if defined(CONFIG_PCIEASPM) 597 static void disable_aspm_l11(struct tegra_pcie_dw *pcie) 598 { 599 u32 val; 600 601 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 602 val &= ~PCI_L1SS_CAP_ASPM_L1_1; 603 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 604 } 605 606 static void disable_aspm_l12(struct tegra_pcie_dw *pcie) 607 { 608 u32 val; 609 610 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 611 val &= ~PCI_L1SS_CAP_ASPM_L1_2; 612 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 613 } 614 615 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) 616 { 617 u32 val; 618 619 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); 620 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); 621 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 622 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; 623 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 624 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); 625 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); 626 627 return val; 628 } 629 630 static int aspm_state_cnt(struct seq_file *s, void *data) 631 { 632 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) 633 dev_get_drvdata(s->private); 634 u32 val; 635 636 seq_printf(s, "Tx L0s entry count : %u\n", 637 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); 638 639 seq_printf(s, "Rx L0s entry count : %u\n", 640 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); 641 642 seq_printf(s, "Link L1 entry count : %u\n", 643 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); 644 645 seq_printf(s, "Link L1.1 entry count : %u\n", 646 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); 647 648 seq_printf(s, "Link L1.2 entry count : %u\n", 649 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); 650 651 /* Clear all counters */ 652 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], 653 EVENT_COUNTER_ALL_CLEAR); 654 655 /* Re-enable counting */ 656 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 657 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 658 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); 659 660 return 0; 661 } 662 663 static void init_host_aspm(struct tegra_pcie_dw *pcie) 664 { 665 struct dw_pcie *pci = &pcie->pci; 666 u32 val; 667 668 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); 669 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; 670 671 /* Enable ASPM counters */ 672 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 673 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 674 dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val); 675 676 /* Program T_cmrt and T_pwr_on values */ 677 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 678 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); 679 val |= (pcie->aspm_cmrt << 8); 680 val |= (pcie->aspm_pwr_on_t << 19); 681 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); 682 683 /* Program L0s and L1 entrance latencies */ 684 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); 685 val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; 686 val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); 687 val |= PORT_AFR_ENTER_ASPM; 688 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); 689 } 690 691 static void init_debugfs(struct tegra_pcie_dw *pcie) 692 { 693 debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, 694 aspm_state_cnt); 695 } 696 #else 697 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } 698 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } 699 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } 700 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } 701 #endif 702 703 static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) 704 { 705 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 706 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 707 u32 val; 708 u16 val_w; 709 710 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 711 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 712 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 713 714 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 715 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; 716 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 717 718 if (pcie->enable_cdm_check) { 719 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 720 val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; 721 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 722 723 val = appl_readl(pcie, APPL_INTR_EN_L1_18); 724 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; 725 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; 726 appl_writel(pcie, val, APPL_INTR_EN_L1_18); 727 } 728 729 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 730 PCI_EXP_LNKSTA); 731 pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >> 732 PCI_EXP_LNKSTA_NLW_SHIFT; 733 734 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 735 PCI_EXP_LNKCTL); 736 val_w |= PCI_EXP_LNKCTL_LBMIE; 737 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, 738 val_w); 739 } 740 741 static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) 742 { 743 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 744 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 745 u32 val; 746 747 /* Enable legacy interrupt generation */ 748 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 749 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 750 val |= APPL_INTR_EN_L0_0_INT_INT_EN; 751 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 752 753 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); 754 val |= APPL_INTR_EN_L1_8_INTX_EN; 755 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; 756 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; 757 if (IS_ENABLED(CONFIG_PCIEAER)) 758 val |= APPL_INTR_EN_L1_8_AER_INT_EN; 759 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); 760 } 761 762 static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) 763 { 764 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 765 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 766 u32 val; 767 768 dw_pcie_msi_init(pp); 769 770 /* Enable MSI interrupt generation */ 771 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 772 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; 773 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; 774 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 775 } 776 777 static void tegra_pcie_enable_interrupts(struct pcie_port *pp) 778 { 779 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 780 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 781 782 /* Clear interrupt statuses before enabling interrupts */ 783 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 784 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 785 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 786 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 787 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 788 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 789 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 790 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 791 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 792 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 793 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 794 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 795 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 796 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 797 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 798 799 tegra_pcie_enable_system_interrupts(pp); 800 tegra_pcie_enable_legacy_interrupts(pp); 801 if (IS_ENABLED(CONFIG_PCI_MSI)) 802 tegra_pcie_enable_msi_interrupts(pp); 803 } 804 805 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) 806 { 807 struct dw_pcie *pci = &pcie->pci; 808 u32 val, offset, i; 809 810 /* Program init preset */ 811 for (i = 0; i < pcie->num_lanes; i++) { 812 val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2)); 813 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; 814 val |= GEN3_GEN4_EQ_PRESET_INIT; 815 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; 816 val |= (GEN3_GEN4_EQ_PRESET_INIT << 817 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); 818 dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val); 819 820 offset = dw_pcie_find_ext_capability(pci, 821 PCI_EXT_CAP_ID_PL_16GT) + 822 PCI_PL_16GT_LE_CTRL; 823 val = dw_pcie_readb_dbi(pci, offset + i); 824 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; 825 val |= GEN3_GEN4_EQ_PRESET_INIT; 826 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; 827 val |= (GEN3_GEN4_EQ_PRESET_INIT << 828 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); 829 dw_pcie_writeb_dbi(pci, offset + i, val); 830 } 831 832 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 833 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 834 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 835 836 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 837 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 838 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 839 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 840 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 841 842 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 843 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 844 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); 845 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 846 847 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 848 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 849 val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 850 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 851 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 852 853 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 854 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 855 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 856 } 857 858 static void tegra_pcie_prepare_host(struct pcie_port *pp) 859 { 860 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 861 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 862 u32 val; 863 864 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); 865 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); 866 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); 867 868 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); 869 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; 870 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; 871 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); 872 873 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 874 875 /* Enable as 0xFFFF0001 response for CRS */ 876 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); 877 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); 878 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << 879 AMBA_ERROR_RESPONSE_CRS_SHIFT); 880 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); 881 882 /* Configure Max lane width from DT */ 883 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); 884 val &= ~PCI_EXP_LNKCAP_MLW; 885 val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); 886 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); 887 888 config_gen3_gen4_eq_presets(pcie); 889 890 init_host_aspm(pcie); 891 892 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 893 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 894 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 895 896 if (pcie->update_fc_fixup) { 897 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 898 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 899 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 900 } 901 902 dw_pcie_setup_rc(pp); 903 904 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 905 906 /* Assert RST */ 907 val = appl_readl(pcie, APPL_PINMUX); 908 val &= ~APPL_PINMUX_PEX_RST; 909 appl_writel(pcie, val, APPL_PINMUX); 910 911 usleep_range(100, 200); 912 913 /* Enable LTSSM */ 914 val = appl_readl(pcie, APPL_CTRL); 915 val |= APPL_CTRL_LTSSM_EN; 916 appl_writel(pcie, val, APPL_CTRL); 917 918 /* De-assert RST */ 919 val = appl_readl(pcie, APPL_PINMUX); 920 val |= APPL_PINMUX_PEX_RST; 921 appl_writel(pcie, val, APPL_PINMUX); 922 923 msleep(100); 924 } 925 926 static int tegra_pcie_dw_host_init(struct pcie_port *pp) 927 { 928 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 929 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 930 u32 val, tmp, offset, speed; 931 932 pp->bridge->ops = &tegra_pci_ops; 933 934 tegra_pcie_prepare_host(pp); 935 936 if (dw_pcie_wait_for_link(pci)) { 937 /* 938 * There are some endpoints which can't get the link up if 939 * root port has Data Link Feature (DLF) enabled. 940 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info 941 * on Scaled Flow Control and DLF. 942 * So, need to confirm that is indeed the case here and attempt 943 * link up once again with DLF disabled. 944 */ 945 val = appl_readl(pcie, APPL_DEBUG); 946 val &= APPL_DEBUG_LTSSM_STATE_MASK; 947 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; 948 tmp = appl_readl(pcie, APPL_LINK_STATUS); 949 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; 950 if (!(val == 0x11 && !tmp)) { 951 /* Link is down for all good reasons */ 952 return 0; 953 } 954 955 dev_info(pci->dev, "Link is down in DLL"); 956 dev_info(pci->dev, "Trying again with DLFE disabled\n"); 957 /* Disable LTSSM */ 958 val = appl_readl(pcie, APPL_CTRL); 959 val &= ~APPL_CTRL_LTSSM_EN; 960 appl_writel(pcie, val, APPL_CTRL); 961 962 reset_control_assert(pcie->core_rst); 963 reset_control_deassert(pcie->core_rst); 964 965 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); 966 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); 967 val &= ~PCI_DLF_EXCHANGE_ENABLE; 968 dw_pcie_writel_dbi(pci, offset, val); 969 970 tegra_pcie_prepare_host(pp); 971 972 if (dw_pcie_wait_for_link(pci)) 973 return 0; 974 } 975 976 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & 977 PCI_EXP_LNKSTA_CLS; 978 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); 979 980 tegra_pcie_enable_interrupts(pp); 981 982 return 0; 983 } 984 985 static int tegra_pcie_dw_link_up(struct dw_pcie *pci) 986 { 987 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 988 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 989 990 return !!(val & PCI_EXP_LNKSTA_DLLLA); 991 } 992 993 static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp) 994 { 995 pp->num_vectors = MAX_MSI_IRQS; 996 } 997 998 static int tegra_pcie_dw_start_link(struct dw_pcie *pci) 999 { 1000 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1001 1002 enable_irq(pcie->pex_rst_irq); 1003 1004 return 0; 1005 } 1006 1007 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) 1008 { 1009 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1010 1011 disable_irq(pcie->pex_rst_irq); 1012 } 1013 1014 static const struct dw_pcie_ops tegra_dw_pcie_ops = { 1015 .link_up = tegra_pcie_dw_link_up, 1016 .start_link = tegra_pcie_dw_start_link, 1017 .stop_link = tegra_pcie_dw_stop_link, 1018 }; 1019 1020 static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { 1021 .host_init = tegra_pcie_dw_host_init, 1022 .set_num_vectors = tegra_pcie_set_msi_vec_num, 1023 }; 1024 1025 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) 1026 { 1027 unsigned int phy_count = pcie->phy_count; 1028 1029 while (phy_count--) { 1030 phy_power_off(pcie->phys[phy_count]); 1031 phy_exit(pcie->phys[phy_count]); 1032 } 1033 } 1034 1035 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) 1036 { 1037 unsigned int i; 1038 int ret; 1039 1040 for (i = 0; i < pcie->phy_count; i++) { 1041 ret = phy_init(pcie->phys[i]); 1042 if (ret < 0) 1043 goto phy_power_off; 1044 1045 ret = phy_power_on(pcie->phys[i]); 1046 if (ret < 0) 1047 goto phy_exit; 1048 } 1049 1050 return 0; 1051 1052 phy_power_off: 1053 while (i--) { 1054 phy_power_off(pcie->phys[i]); 1055 phy_exit: 1056 phy_exit(pcie->phys[i]); 1057 } 1058 1059 return ret; 1060 } 1061 1062 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) 1063 { 1064 struct device_node *np = pcie->dev->of_node; 1065 int ret; 1066 1067 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); 1068 if (ret < 0) { 1069 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); 1070 return ret; 1071 } 1072 1073 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", 1074 &pcie->aspm_pwr_on_t); 1075 if (ret < 0) 1076 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", 1077 ret); 1078 1079 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", 1080 &pcie->aspm_l0s_enter_lat); 1081 if (ret < 0) 1082 dev_info(pcie->dev, 1083 "Failed to read ASPM L0s Entrance latency: %d\n", ret); 1084 1085 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); 1086 if (ret < 0) { 1087 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); 1088 return ret; 1089 } 1090 1091 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); 1092 if (ret) { 1093 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); 1094 return ret; 1095 } 1096 1097 ret = of_property_count_strings(np, "phy-names"); 1098 if (ret < 0) { 1099 dev_err(pcie->dev, "Failed to find PHY entries: %d\n", 1100 ret); 1101 return ret; 1102 } 1103 pcie->phy_count = ret; 1104 1105 if (of_property_read_bool(np, "nvidia,update-fc-fixup")) 1106 pcie->update_fc_fixup = true; 1107 1108 pcie->supports_clkreq = 1109 of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); 1110 1111 pcie->enable_cdm_check = 1112 of_property_read_bool(np, "snps,enable-cdm-check"); 1113 1114 if (pcie->mode == DW_PCIE_RC_TYPE) 1115 return 0; 1116 1117 /* Endpoint mode specific DT entries */ 1118 pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); 1119 if (IS_ERR(pcie->pex_rst_gpiod)) { 1120 int err = PTR_ERR(pcie->pex_rst_gpiod); 1121 const char *level = KERN_ERR; 1122 1123 if (err == -EPROBE_DEFER) 1124 level = KERN_DEBUG; 1125 1126 dev_printk(level, pcie->dev, 1127 dev_fmt("Failed to get PERST GPIO: %d\n"), 1128 err); 1129 return err; 1130 } 1131 1132 pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, 1133 "nvidia,refclk-select", 1134 GPIOD_OUT_HIGH); 1135 if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { 1136 int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); 1137 const char *level = KERN_ERR; 1138 1139 if (err == -EPROBE_DEFER) 1140 level = KERN_DEBUG; 1141 1142 dev_printk(level, pcie->dev, 1143 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), 1144 err); 1145 pcie->pex_refclk_sel_gpiod = NULL; 1146 } 1147 1148 return 0; 1149 } 1150 1151 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, 1152 bool enable) 1153 { 1154 struct mrq_uphy_response resp; 1155 struct tegra_bpmp_message msg; 1156 struct mrq_uphy_request req; 1157 1158 /* Controller-5 doesn't need to have its state set by BPMP-FW */ 1159 if (pcie->cid == 5) 1160 return 0; 1161 1162 memset(&req, 0, sizeof(req)); 1163 memset(&resp, 0, sizeof(resp)); 1164 1165 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; 1166 req.controller_state.pcie_controller = pcie->cid; 1167 req.controller_state.enable = enable; 1168 1169 memset(&msg, 0, sizeof(msg)); 1170 msg.mrq = MRQ_UPHY; 1171 msg.tx.data = &req; 1172 msg.tx.size = sizeof(req); 1173 msg.rx.data = &resp; 1174 msg.rx.size = sizeof(resp); 1175 1176 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1177 } 1178 1179 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, 1180 bool enable) 1181 { 1182 struct mrq_uphy_response resp; 1183 struct tegra_bpmp_message msg; 1184 struct mrq_uphy_request req; 1185 1186 memset(&req, 0, sizeof(req)); 1187 memset(&resp, 0, sizeof(resp)); 1188 1189 if (enable) { 1190 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; 1191 req.ep_ctrlr_pll_init.ep_controller = pcie->cid; 1192 } else { 1193 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; 1194 req.ep_ctrlr_pll_off.ep_controller = pcie->cid; 1195 } 1196 1197 memset(&msg, 0, sizeof(msg)); 1198 msg.mrq = MRQ_UPHY; 1199 msg.tx.data = &req; 1200 msg.tx.size = sizeof(req); 1201 msg.rx.data = &resp; 1202 msg.rx.size = sizeof(resp); 1203 1204 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1205 } 1206 1207 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) 1208 { 1209 struct pcie_port *pp = &pcie->pci.pp; 1210 struct pci_bus *child, *root_bus = NULL; 1211 struct pci_dev *pdev; 1212 1213 /* 1214 * link doesn't go into L2 state with some of the endpoints with Tegra 1215 * if they are not in D0 state. So, need to make sure that immediate 1216 * downstream devices are in D0 state before sending PME_TurnOff to put 1217 * link into L2 state. 1218 * This is as per PCI Express Base r4.0 v1.0 September 27-2017, 1219 * 5.2 Link State Power Management (Page #428). 1220 */ 1221 1222 list_for_each_entry(child, &pp->bridge->bus->children, node) { 1223 /* Bring downstream devices to D0 if they are not already in */ 1224 if (child->parent == pp->bridge->bus) { 1225 root_bus = child; 1226 break; 1227 } 1228 } 1229 1230 if (!root_bus) { 1231 dev_err(pcie->dev, "Failed to find downstream devices\n"); 1232 return; 1233 } 1234 1235 list_for_each_entry(pdev, &root_bus->devices, bus_list) { 1236 if (PCI_SLOT(pdev->devfn) == 0) { 1237 if (pci_set_power_state(pdev, PCI_D0)) 1238 dev_err(pcie->dev, 1239 "Failed to transition %s to D0 state\n", 1240 dev_name(&pdev->dev)); 1241 } 1242 } 1243 } 1244 1245 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) 1246 { 1247 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); 1248 if (IS_ERR(pcie->slot_ctl_3v3)) { 1249 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) 1250 return PTR_ERR(pcie->slot_ctl_3v3); 1251 1252 pcie->slot_ctl_3v3 = NULL; 1253 } 1254 1255 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); 1256 if (IS_ERR(pcie->slot_ctl_12v)) { 1257 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) 1258 return PTR_ERR(pcie->slot_ctl_12v); 1259 1260 pcie->slot_ctl_12v = NULL; 1261 } 1262 1263 return 0; 1264 } 1265 1266 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) 1267 { 1268 int ret; 1269 1270 if (pcie->slot_ctl_3v3) { 1271 ret = regulator_enable(pcie->slot_ctl_3v3); 1272 if (ret < 0) { 1273 dev_err(pcie->dev, 1274 "Failed to enable 3.3V slot supply: %d\n", ret); 1275 return ret; 1276 } 1277 } 1278 1279 if (pcie->slot_ctl_12v) { 1280 ret = regulator_enable(pcie->slot_ctl_12v); 1281 if (ret < 0) { 1282 dev_err(pcie->dev, 1283 "Failed to enable 12V slot supply: %d\n", ret); 1284 goto fail_12v_enable; 1285 } 1286 } 1287 1288 /* 1289 * According to PCI Express Card Electromechanical Specification 1290 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) 1291 * should be a minimum of 100ms. 1292 */ 1293 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) 1294 msleep(100); 1295 1296 return 0; 1297 1298 fail_12v_enable: 1299 if (pcie->slot_ctl_3v3) 1300 regulator_disable(pcie->slot_ctl_3v3); 1301 return ret; 1302 } 1303 1304 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) 1305 { 1306 if (pcie->slot_ctl_12v) 1307 regulator_disable(pcie->slot_ctl_12v); 1308 if (pcie->slot_ctl_3v3) 1309 regulator_disable(pcie->slot_ctl_3v3); 1310 } 1311 1312 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, 1313 bool en_hw_hot_rst) 1314 { 1315 int ret; 1316 u32 val; 1317 1318 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1319 if (ret) { 1320 dev_err(pcie->dev, 1321 "Failed to enable controller %u: %d\n", pcie->cid, ret); 1322 return ret; 1323 } 1324 1325 ret = tegra_pcie_enable_slot_regulators(pcie); 1326 if (ret < 0) 1327 goto fail_slot_reg_en; 1328 1329 ret = regulator_enable(pcie->pex_ctl_supply); 1330 if (ret < 0) { 1331 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); 1332 goto fail_reg_en; 1333 } 1334 1335 ret = clk_prepare_enable(pcie->core_clk); 1336 if (ret) { 1337 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); 1338 goto fail_core_clk; 1339 } 1340 1341 ret = reset_control_deassert(pcie->core_apb_rst); 1342 if (ret) { 1343 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", 1344 ret); 1345 goto fail_core_apb_rst; 1346 } 1347 1348 if (en_hw_hot_rst) { 1349 /* Enable HW_HOT_RST mode */ 1350 val = appl_readl(pcie, APPL_CTRL); 1351 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 1352 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1353 val |= APPL_CTRL_HW_HOT_RST_EN; 1354 appl_writel(pcie, val, APPL_CTRL); 1355 } 1356 1357 ret = tegra_pcie_enable_phy(pcie); 1358 if (ret) { 1359 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); 1360 goto fail_phy; 1361 } 1362 1363 /* Update CFG base address */ 1364 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1365 APPL_CFG_BASE_ADDR); 1366 1367 /* Configure this core for RP mode operation */ 1368 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); 1369 1370 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1371 1372 val = appl_readl(pcie, APPL_CTRL); 1373 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); 1374 1375 val = appl_readl(pcie, APPL_CFG_MISC); 1376 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1377 appl_writel(pcie, val, APPL_CFG_MISC); 1378 1379 if (!pcie->supports_clkreq) { 1380 val = appl_readl(pcie, APPL_PINMUX); 1381 val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; 1382 val &= ~APPL_PINMUX_CLKREQ_OVERRIDE; 1383 appl_writel(pcie, val, APPL_PINMUX); 1384 } 1385 1386 /* Update iATU_DMA base address */ 1387 appl_writel(pcie, 1388 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1389 APPL_CFG_IATU_DMA_BASE_ADDR); 1390 1391 reset_control_deassert(pcie->core_rst); 1392 1393 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 1394 PCI_CAP_ID_EXP); 1395 1396 /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */ 1397 if (!pcie->supports_clkreq) { 1398 disable_aspm_l11(pcie); 1399 disable_aspm_l12(pcie); 1400 } 1401 1402 return ret; 1403 1404 fail_phy: 1405 reset_control_assert(pcie->core_apb_rst); 1406 fail_core_apb_rst: 1407 clk_disable_unprepare(pcie->core_clk); 1408 fail_core_clk: 1409 regulator_disable(pcie->pex_ctl_supply); 1410 fail_reg_en: 1411 tegra_pcie_disable_slot_regulators(pcie); 1412 fail_slot_reg_en: 1413 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1414 1415 return ret; 1416 } 1417 1418 static int __deinit_controller(struct tegra_pcie_dw *pcie) 1419 { 1420 int ret; 1421 1422 ret = reset_control_assert(pcie->core_rst); 1423 if (ret) { 1424 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", 1425 ret); 1426 return ret; 1427 } 1428 1429 tegra_pcie_disable_phy(pcie); 1430 1431 ret = reset_control_assert(pcie->core_apb_rst); 1432 if (ret) { 1433 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); 1434 return ret; 1435 } 1436 1437 clk_disable_unprepare(pcie->core_clk); 1438 1439 ret = regulator_disable(pcie->pex_ctl_supply); 1440 if (ret) { 1441 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); 1442 return ret; 1443 } 1444 1445 tegra_pcie_disable_slot_regulators(pcie); 1446 1447 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1448 if (ret) { 1449 dev_err(pcie->dev, "Failed to disable controller %d: %d\n", 1450 pcie->cid, ret); 1451 return ret; 1452 } 1453 1454 return ret; 1455 } 1456 1457 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) 1458 { 1459 struct dw_pcie *pci = &pcie->pci; 1460 struct pcie_port *pp = &pci->pp; 1461 int ret; 1462 1463 ret = tegra_pcie_config_controller(pcie, false); 1464 if (ret < 0) 1465 return ret; 1466 1467 pp->ops = &tegra_pcie_dw_host_ops; 1468 1469 ret = dw_pcie_host_init(pp); 1470 if (ret < 0) { 1471 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); 1472 goto fail_host_init; 1473 } 1474 1475 return 0; 1476 1477 fail_host_init: 1478 return __deinit_controller(pcie); 1479 } 1480 1481 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) 1482 { 1483 u32 val; 1484 1485 if (!tegra_pcie_dw_link_up(&pcie->pci)) 1486 return 0; 1487 1488 val = appl_readl(pcie, APPL_RADM_STATUS); 1489 val |= APPL_PM_XMT_TURNOFF_STATE; 1490 appl_writel(pcie, val, APPL_RADM_STATUS); 1491 1492 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, 1493 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, 1494 1, PME_ACK_TIMEOUT); 1495 } 1496 1497 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) 1498 { 1499 u32 data; 1500 int err; 1501 1502 if (!tegra_pcie_dw_link_up(&pcie->pci)) { 1503 dev_dbg(pcie->dev, "PCIe link is not up...!\n"); 1504 return; 1505 } 1506 1507 if (tegra_pcie_try_link_l2(pcie)) { 1508 dev_info(pcie->dev, "Link didn't transition to L2 state\n"); 1509 /* 1510 * TX lane clock freq will reset to Gen1 only if link is in L2 1511 * or detect state. 1512 * So apply pex_rst to end point to force RP to go into detect 1513 * state 1514 */ 1515 data = appl_readl(pcie, APPL_PINMUX); 1516 data &= ~APPL_PINMUX_PEX_RST; 1517 appl_writel(pcie, data, APPL_PINMUX); 1518 1519 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, 1520 data, 1521 ((data & 1522 APPL_DEBUG_LTSSM_STATE_MASK) >> 1523 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1524 LTSSM_STATE_PRE_DETECT, 1525 1, LTSSM_TIMEOUT); 1526 if (err) { 1527 dev_info(pcie->dev, "Link didn't go to detect state\n"); 1528 } else { 1529 /* Disable LTSSM after link is in detect state */ 1530 data = appl_readl(pcie, APPL_CTRL); 1531 data &= ~APPL_CTRL_LTSSM_EN; 1532 appl_writel(pcie, data, APPL_CTRL); 1533 } 1534 } 1535 /* 1536 * DBI registers may not be accessible after this as PLL-E would be 1537 * down depending on how CLKREQ is pulled by end point 1538 */ 1539 data = appl_readl(pcie, APPL_PINMUX); 1540 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); 1541 /* Cut REFCLK to slot */ 1542 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1543 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1544 appl_writel(pcie, data, APPL_PINMUX); 1545 } 1546 1547 static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) 1548 { 1549 tegra_pcie_downstream_dev_to_D0(pcie); 1550 dw_pcie_host_deinit(&pcie->pci.pp); 1551 tegra_pcie_dw_pme_turnoff(pcie); 1552 1553 return __deinit_controller(pcie); 1554 } 1555 1556 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) 1557 { 1558 struct pcie_port *pp = &pcie->pci.pp; 1559 struct device *dev = pcie->dev; 1560 char *name; 1561 int ret; 1562 1563 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1564 pp->msi_irq = of_irq_get_byname(dev->of_node, "msi"); 1565 if (!pp->msi_irq) { 1566 dev_err(dev, "Failed to get MSI interrupt\n"); 1567 return -ENODEV; 1568 } 1569 } 1570 1571 pm_runtime_enable(dev); 1572 1573 ret = pm_runtime_get_sync(dev); 1574 if (ret < 0) { 1575 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1576 ret); 1577 goto fail_pm_get_sync; 1578 } 1579 1580 ret = pinctrl_pm_select_default_state(dev); 1581 if (ret < 0) { 1582 dev_err(dev, "Failed to configure sideband pins: %d\n", ret); 1583 goto fail_pm_get_sync; 1584 } 1585 1586 tegra_pcie_init_controller(pcie); 1587 1588 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); 1589 if (!pcie->link_state) { 1590 ret = -ENOMEDIUM; 1591 goto fail_host_init; 1592 } 1593 1594 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1595 if (!name) { 1596 ret = -ENOMEM; 1597 goto fail_host_init; 1598 } 1599 1600 pcie->debugfs = debugfs_create_dir(name, NULL); 1601 init_debugfs(pcie); 1602 1603 return ret; 1604 1605 fail_host_init: 1606 tegra_pcie_deinit_controller(pcie); 1607 fail_pm_get_sync: 1608 pm_runtime_put_sync(dev); 1609 pm_runtime_disable(dev); 1610 return ret; 1611 } 1612 1613 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) 1614 { 1615 u32 val; 1616 int ret; 1617 1618 if (pcie->ep_state == EP_STATE_DISABLED) 1619 return; 1620 1621 /* Disable LTSSM */ 1622 val = appl_readl(pcie, APPL_CTRL); 1623 val &= ~APPL_CTRL_LTSSM_EN; 1624 appl_writel(pcie, val, APPL_CTRL); 1625 1626 ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, 1627 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> 1628 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1629 LTSSM_STATE_PRE_DETECT, 1630 1, LTSSM_TIMEOUT); 1631 if (ret) 1632 dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); 1633 1634 reset_control_assert(pcie->core_rst); 1635 1636 tegra_pcie_disable_phy(pcie); 1637 1638 reset_control_assert(pcie->core_apb_rst); 1639 1640 clk_disable_unprepare(pcie->core_clk); 1641 1642 pm_runtime_put_sync(pcie->dev); 1643 1644 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1645 if (ret) 1646 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); 1647 1648 pcie->ep_state = EP_STATE_DISABLED; 1649 dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); 1650 } 1651 1652 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) 1653 { 1654 struct dw_pcie *pci = &pcie->pci; 1655 struct dw_pcie_ep *ep = &pci->ep; 1656 struct device *dev = pcie->dev; 1657 u32 val; 1658 int ret; 1659 1660 if (pcie->ep_state == EP_STATE_ENABLED) 1661 return; 1662 1663 ret = pm_runtime_get_sync(dev); 1664 if (ret < 0) { 1665 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1666 ret); 1667 return; 1668 } 1669 1670 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1671 if (ret) { 1672 dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret); 1673 goto fail_pll_init; 1674 } 1675 1676 ret = clk_prepare_enable(pcie->core_clk); 1677 if (ret) { 1678 dev_err(dev, "Failed to enable core clock: %d\n", ret); 1679 goto fail_core_clk_enable; 1680 } 1681 1682 ret = reset_control_deassert(pcie->core_apb_rst); 1683 if (ret) { 1684 dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); 1685 goto fail_core_apb_rst; 1686 } 1687 1688 ret = tegra_pcie_enable_phy(pcie); 1689 if (ret) { 1690 dev_err(dev, "Failed to enable PHY: %d\n", ret); 1691 goto fail_phy; 1692 } 1693 1694 /* Clear any stale interrupt statuses */ 1695 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 1696 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 1697 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 1698 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 1699 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 1700 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 1701 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 1702 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 1703 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 1704 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 1705 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 1706 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 1707 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 1708 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 1709 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 1710 1711 /* configure this core for EP mode operation */ 1712 val = appl_readl(pcie, APPL_DM_TYPE); 1713 val &= ~APPL_DM_TYPE_MASK; 1714 val |= APPL_DM_TYPE_EP; 1715 appl_writel(pcie, val, APPL_DM_TYPE); 1716 1717 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1718 1719 val = appl_readl(pcie, APPL_CTRL); 1720 val |= APPL_CTRL_SYS_PRE_DET_STATE; 1721 val |= APPL_CTRL_HW_HOT_RST_EN; 1722 appl_writel(pcie, val, APPL_CTRL); 1723 1724 val = appl_readl(pcie, APPL_CFG_MISC); 1725 val |= APPL_CFG_MISC_SLV_EP_MODE; 1726 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1727 appl_writel(pcie, val, APPL_CFG_MISC); 1728 1729 val = appl_readl(pcie, APPL_PINMUX); 1730 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1731 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1732 appl_writel(pcie, val, APPL_PINMUX); 1733 1734 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1735 APPL_CFG_BASE_ADDR); 1736 1737 appl_writel(pcie, pcie->atu_dma_res->start & 1738 APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1739 APPL_CFG_IATU_DMA_BASE_ADDR); 1740 1741 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 1742 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 1743 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 1744 val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; 1745 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 1746 1747 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 1748 val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; 1749 val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; 1750 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 1751 1752 reset_control_deassert(pcie->core_rst); 1753 1754 if (pcie->update_fc_fixup) { 1755 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 1756 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 1757 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 1758 } 1759 1760 config_gen3_gen4_eq_presets(pcie); 1761 1762 init_host_aspm(pcie); 1763 1764 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ 1765 if (!pcie->supports_clkreq) { 1766 disable_aspm_l11(pcie); 1767 disable_aspm_l12(pcie); 1768 } 1769 1770 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1771 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1772 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1773 1774 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 1775 PCI_CAP_ID_EXP); 1776 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 1777 1778 val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); 1779 val |= MSIX_ADDR_MATCH_LOW_OFF_EN; 1780 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); 1781 val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); 1782 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); 1783 1784 ret = dw_pcie_ep_init_complete(ep); 1785 if (ret) { 1786 dev_err(dev, "Failed to complete initialization: %d\n", ret); 1787 goto fail_init_complete; 1788 } 1789 1790 dw_pcie_ep_init_notify(ep); 1791 1792 /* Enable LTSSM */ 1793 val = appl_readl(pcie, APPL_CTRL); 1794 val |= APPL_CTRL_LTSSM_EN; 1795 appl_writel(pcie, val, APPL_CTRL); 1796 1797 pcie->ep_state = EP_STATE_ENABLED; 1798 dev_dbg(dev, "Initialization of endpoint is completed\n"); 1799 1800 return; 1801 1802 fail_init_complete: 1803 reset_control_assert(pcie->core_rst); 1804 tegra_pcie_disable_phy(pcie); 1805 fail_phy: 1806 reset_control_assert(pcie->core_apb_rst); 1807 fail_core_apb_rst: 1808 clk_disable_unprepare(pcie->core_clk); 1809 fail_core_clk_enable: 1810 tegra_pcie_bpmp_set_pll_state(pcie, false); 1811 fail_pll_init: 1812 pm_runtime_put_sync(dev); 1813 } 1814 1815 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) 1816 { 1817 struct tegra_pcie_dw *pcie = arg; 1818 1819 if (gpiod_get_value(pcie->pex_rst_gpiod)) 1820 pex_ep_event_pex_rst_assert(pcie); 1821 else 1822 pex_ep_event_pex_rst_deassert(pcie); 1823 1824 return IRQ_HANDLED; 1825 } 1826 1827 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) 1828 { 1829 /* Tegra194 supports only INTA */ 1830 if (irq > 1) 1831 return -EINVAL; 1832 1833 appl_writel(pcie, 1, APPL_LEGACY_INTX); 1834 usleep_range(1000, 2000); 1835 appl_writel(pcie, 0, APPL_LEGACY_INTX); 1836 return 0; 1837 } 1838 1839 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) 1840 { 1841 if (unlikely(irq > 31)) 1842 return -EINVAL; 1843 1844 appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1); 1845 1846 return 0; 1847 } 1848 1849 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) 1850 { 1851 struct dw_pcie_ep *ep = &pcie->pci.ep; 1852 1853 writel(irq, ep->msi_mem); 1854 1855 return 0; 1856 } 1857 1858 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1859 enum pci_epc_irq_type type, 1860 u16 interrupt_num) 1861 { 1862 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1863 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1864 1865 switch (type) { 1866 case PCI_EPC_IRQ_LEGACY: 1867 return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); 1868 1869 case PCI_EPC_IRQ_MSI: 1870 return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); 1871 1872 case PCI_EPC_IRQ_MSIX: 1873 return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); 1874 1875 default: 1876 dev_err(pci->dev, "Unknown IRQ type\n"); 1877 return -EPERM; 1878 } 1879 1880 return 0; 1881 } 1882 1883 static const struct pci_epc_features tegra_pcie_epc_features = { 1884 .linkup_notifier = true, 1885 .core_init_notifier = true, 1886 .msi_capable = false, 1887 .msix_capable = false, 1888 .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, 1889 .bar_fixed_64bit = 1 << BAR_0, 1890 .bar_fixed_size[0] = SZ_1M, 1891 }; 1892 1893 static const struct pci_epc_features* 1894 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) 1895 { 1896 return &tegra_pcie_epc_features; 1897 } 1898 1899 static struct dw_pcie_ep_ops pcie_ep_ops = { 1900 .raise_irq = tegra_pcie_ep_raise_irq, 1901 .get_features = tegra_pcie_ep_get_features, 1902 }; 1903 1904 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, 1905 struct platform_device *pdev) 1906 { 1907 struct dw_pcie *pci = &pcie->pci; 1908 struct device *dev = pcie->dev; 1909 struct dw_pcie_ep *ep; 1910 struct resource *res; 1911 char *name; 1912 int ret; 1913 1914 ep = &pci->ep; 1915 ep->ops = &pcie_ep_ops; 1916 1917 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 1918 if (!res) 1919 return -EINVAL; 1920 1921 ep->phys_base = res->start; 1922 ep->addr_size = resource_size(res); 1923 ep->page_size = SZ_64K; 1924 1925 ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); 1926 if (ret < 0) { 1927 dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", 1928 ret); 1929 return ret; 1930 } 1931 1932 ret = gpiod_to_irq(pcie->pex_rst_gpiod); 1933 if (ret < 0) { 1934 dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); 1935 return ret; 1936 } 1937 pcie->pex_rst_irq = (unsigned int)ret; 1938 1939 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", 1940 pcie->cid); 1941 if (!name) { 1942 dev_err(dev, "Failed to create PERST IRQ string\n"); 1943 return -ENOMEM; 1944 } 1945 1946 irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); 1947 1948 pcie->ep_state = EP_STATE_DISABLED; 1949 1950 ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, 1951 tegra_pcie_ep_pex_rst_irq, 1952 IRQF_TRIGGER_RISING | 1953 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1954 name, (void *)pcie); 1955 if (ret < 0) { 1956 dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); 1957 return ret; 1958 } 1959 1960 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work", 1961 pcie->cid); 1962 if (!name) { 1963 dev_err(dev, "Failed to create PCIe EP work thread string\n"); 1964 return -ENOMEM; 1965 } 1966 1967 pm_runtime_enable(dev); 1968 1969 ret = dw_pcie_ep_init(ep); 1970 if (ret) { 1971 dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", 1972 ret); 1973 return ret; 1974 } 1975 1976 return 0; 1977 } 1978 1979 static int tegra_pcie_dw_probe(struct platform_device *pdev) 1980 { 1981 const struct tegra_pcie_dw_of_data *data; 1982 struct device *dev = &pdev->dev; 1983 struct resource *atu_dma_res; 1984 struct tegra_pcie_dw *pcie; 1985 struct resource *dbi_res; 1986 struct pcie_port *pp; 1987 struct dw_pcie *pci; 1988 struct phy **phys; 1989 char *name; 1990 int ret; 1991 u32 i; 1992 1993 data = of_device_get_match_data(dev); 1994 1995 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1996 if (!pcie) 1997 return -ENOMEM; 1998 1999 pci = &pcie->pci; 2000 pci->dev = &pdev->dev; 2001 pci->ops = &tegra_dw_pcie_ops; 2002 pci->n_fts[0] = N_FTS_VAL; 2003 pci->n_fts[1] = FTS_VAL; 2004 2005 pp = &pci->pp; 2006 pcie->dev = &pdev->dev; 2007 pcie->mode = (enum dw_pcie_device_mode)data->mode; 2008 2009 ret = tegra_pcie_dw_parse_dt(pcie); 2010 if (ret < 0) { 2011 const char *level = KERN_ERR; 2012 2013 if (ret == -EPROBE_DEFER) 2014 level = KERN_DEBUG; 2015 2016 dev_printk(level, dev, 2017 dev_fmt("Failed to parse device tree: %d\n"), 2018 ret); 2019 return ret; 2020 } 2021 2022 ret = tegra_pcie_get_slot_regulators(pcie); 2023 if (ret < 0) { 2024 const char *level = KERN_ERR; 2025 2026 if (ret == -EPROBE_DEFER) 2027 level = KERN_DEBUG; 2028 2029 dev_printk(level, dev, 2030 dev_fmt("Failed to get slot regulators: %d\n"), 2031 ret); 2032 return ret; 2033 } 2034 2035 if (pcie->pex_refclk_sel_gpiod) 2036 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); 2037 2038 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); 2039 if (IS_ERR(pcie->pex_ctl_supply)) { 2040 ret = PTR_ERR(pcie->pex_ctl_supply); 2041 if (ret != -EPROBE_DEFER) 2042 dev_err(dev, "Failed to get regulator: %ld\n", 2043 PTR_ERR(pcie->pex_ctl_supply)); 2044 return ret; 2045 } 2046 2047 pcie->core_clk = devm_clk_get(dev, "core"); 2048 if (IS_ERR(pcie->core_clk)) { 2049 dev_err(dev, "Failed to get core clock: %ld\n", 2050 PTR_ERR(pcie->core_clk)); 2051 return PTR_ERR(pcie->core_clk); 2052 } 2053 2054 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2055 "appl"); 2056 if (!pcie->appl_res) { 2057 dev_err(dev, "Failed to find \"appl\" region\n"); 2058 return -ENODEV; 2059 } 2060 2061 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); 2062 if (IS_ERR(pcie->appl_base)) 2063 return PTR_ERR(pcie->appl_base); 2064 2065 pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); 2066 if (IS_ERR(pcie->core_apb_rst)) { 2067 dev_err(dev, "Failed to get APB reset: %ld\n", 2068 PTR_ERR(pcie->core_apb_rst)); 2069 return PTR_ERR(pcie->core_apb_rst); 2070 } 2071 2072 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); 2073 if (!phys) 2074 return -ENOMEM; 2075 2076 for (i = 0; i < pcie->phy_count; i++) { 2077 name = kasprintf(GFP_KERNEL, "p2u-%u", i); 2078 if (!name) { 2079 dev_err(dev, "Failed to create P2U string\n"); 2080 return -ENOMEM; 2081 } 2082 phys[i] = devm_phy_get(dev, name); 2083 kfree(name); 2084 if (IS_ERR(phys[i])) { 2085 ret = PTR_ERR(phys[i]); 2086 if (ret != -EPROBE_DEFER) 2087 dev_err(dev, "Failed to get PHY: %d\n", ret); 2088 return ret; 2089 } 2090 } 2091 2092 pcie->phys = phys; 2093 2094 dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 2095 if (!dbi_res) { 2096 dev_err(dev, "Failed to find \"dbi\" region\n"); 2097 return -ENODEV; 2098 } 2099 pcie->dbi_res = dbi_res; 2100 2101 pci->dbi_base = devm_ioremap_resource(dev, dbi_res); 2102 if (IS_ERR(pci->dbi_base)) 2103 return PTR_ERR(pci->dbi_base); 2104 2105 /* Tegra HW locates DBI2 at a fixed offset from DBI */ 2106 pci->dbi_base2 = pci->dbi_base + 0x1000; 2107 2108 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2109 "atu_dma"); 2110 if (!atu_dma_res) { 2111 dev_err(dev, "Failed to find \"atu_dma\" region\n"); 2112 return -ENODEV; 2113 } 2114 pcie->atu_dma_res = atu_dma_res; 2115 2116 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); 2117 if (IS_ERR(pci->atu_base)) 2118 return PTR_ERR(pci->atu_base); 2119 2120 pcie->core_rst = devm_reset_control_get(dev, "core"); 2121 if (IS_ERR(pcie->core_rst)) { 2122 dev_err(dev, "Failed to get core reset: %ld\n", 2123 PTR_ERR(pcie->core_rst)); 2124 return PTR_ERR(pcie->core_rst); 2125 } 2126 2127 pp->irq = platform_get_irq_byname(pdev, "intr"); 2128 if (pp->irq < 0) 2129 return pp->irq; 2130 2131 pcie->bpmp = tegra_bpmp_get(dev); 2132 if (IS_ERR(pcie->bpmp)) 2133 return PTR_ERR(pcie->bpmp); 2134 2135 platform_set_drvdata(pdev, pcie); 2136 2137 switch (pcie->mode) { 2138 case DW_PCIE_RC_TYPE: 2139 ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, 2140 IRQF_SHARED, "tegra-pcie-intr", pcie); 2141 if (ret) { 2142 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2143 ret); 2144 goto fail; 2145 } 2146 2147 ret = tegra_pcie_config_rp(pcie); 2148 if (ret && ret != -ENOMEDIUM) 2149 goto fail; 2150 else 2151 return 0; 2152 break; 2153 2154 case DW_PCIE_EP_TYPE: 2155 ret = devm_request_threaded_irq(dev, pp->irq, 2156 tegra_pcie_ep_hard_irq, 2157 tegra_pcie_ep_irq_thread, 2158 IRQF_SHARED | IRQF_ONESHOT, 2159 "tegra-pcie-ep-intr", pcie); 2160 if (ret) { 2161 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2162 ret); 2163 goto fail; 2164 } 2165 2166 ret = tegra_pcie_config_ep(pcie, pdev); 2167 if (ret < 0) 2168 goto fail; 2169 break; 2170 2171 default: 2172 dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode); 2173 } 2174 2175 fail: 2176 tegra_bpmp_put(pcie->bpmp); 2177 return ret; 2178 } 2179 2180 static int tegra_pcie_dw_remove(struct platform_device *pdev) 2181 { 2182 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2183 2184 if (!pcie->link_state) 2185 return 0; 2186 2187 debugfs_remove_recursive(pcie->debugfs); 2188 tegra_pcie_deinit_controller(pcie); 2189 pm_runtime_put_sync(pcie->dev); 2190 pm_runtime_disable(pcie->dev); 2191 tegra_bpmp_put(pcie->bpmp); 2192 if (pcie->pex_refclk_sel_gpiod) 2193 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); 2194 2195 return 0; 2196 } 2197 2198 static int tegra_pcie_dw_suspend_late(struct device *dev) 2199 { 2200 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2201 u32 val; 2202 2203 if (!pcie->link_state) 2204 return 0; 2205 2206 /* Enable HW_HOT_RST mode */ 2207 val = appl_readl(pcie, APPL_CTRL); 2208 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2209 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2210 val |= APPL_CTRL_HW_HOT_RST_EN; 2211 appl_writel(pcie, val, APPL_CTRL); 2212 2213 return 0; 2214 } 2215 2216 static int tegra_pcie_dw_suspend_noirq(struct device *dev) 2217 { 2218 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2219 2220 if (!pcie->link_state) 2221 return 0; 2222 2223 /* Save MSI interrupt vector */ 2224 pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci, 2225 PORT_LOGIC_MSI_CTRL_INT_0_EN); 2226 tegra_pcie_downstream_dev_to_D0(pcie); 2227 tegra_pcie_dw_pme_turnoff(pcie); 2228 2229 return __deinit_controller(pcie); 2230 } 2231 2232 static int tegra_pcie_dw_resume_noirq(struct device *dev) 2233 { 2234 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2235 int ret; 2236 2237 if (!pcie->link_state) 2238 return 0; 2239 2240 ret = tegra_pcie_config_controller(pcie, true); 2241 if (ret < 0) 2242 return ret; 2243 2244 ret = tegra_pcie_dw_host_init(&pcie->pci.pp); 2245 if (ret < 0) { 2246 dev_err(dev, "Failed to init host: %d\n", ret); 2247 goto fail_host_init; 2248 } 2249 2250 /* Restore MSI interrupt vector */ 2251 dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN, 2252 pcie->msi_ctrl_int); 2253 2254 return 0; 2255 2256 fail_host_init: 2257 return __deinit_controller(pcie); 2258 } 2259 2260 static int tegra_pcie_dw_resume_early(struct device *dev) 2261 { 2262 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2263 u32 val; 2264 2265 if (!pcie->link_state) 2266 return 0; 2267 2268 /* Disable HW_HOT_RST mode */ 2269 val = appl_readl(pcie, APPL_CTRL); 2270 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2271 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2272 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << 2273 APPL_CTRL_HW_HOT_RST_MODE_SHIFT; 2274 val &= ~APPL_CTRL_HW_HOT_RST_EN; 2275 appl_writel(pcie, val, APPL_CTRL); 2276 2277 return 0; 2278 } 2279 2280 static void tegra_pcie_dw_shutdown(struct platform_device *pdev) 2281 { 2282 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2283 2284 if (!pcie->link_state) 2285 return; 2286 2287 debugfs_remove_recursive(pcie->debugfs); 2288 tegra_pcie_downstream_dev_to_D0(pcie); 2289 2290 disable_irq(pcie->pci.pp.irq); 2291 if (IS_ENABLED(CONFIG_PCI_MSI)) 2292 disable_irq(pcie->pci.pp.msi_irq); 2293 2294 tegra_pcie_dw_pme_turnoff(pcie); 2295 __deinit_controller(pcie); 2296 } 2297 2298 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = { 2299 .mode = DW_PCIE_RC_TYPE, 2300 }; 2301 2302 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = { 2303 .mode = DW_PCIE_EP_TYPE, 2304 }; 2305 2306 static const struct of_device_id tegra_pcie_dw_of_match[] = { 2307 { 2308 .compatible = "nvidia,tegra194-pcie", 2309 .data = &tegra_pcie_dw_rc_of_data, 2310 }, 2311 { 2312 .compatible = "nvidia,tegra194-pcie-ep", 2313 .data = &tegra_pcie_dw_ep_of_data, 2314 }, 2315 {}, 2316 }; 2317 2318 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { 2319 .suspend_late = tegra_pcie_dw_suspend_late, 2320 .suspend_noirq = tegra_pcie_dw_suspend_noirq, 2321 .resume_noirq = tegra_pcie_dw_resume_noirq, 2322 .resume_early = tegra_pcie_dw_resume_early, 2323 }; 2324 2325 static struct platform_driver tegra_pcie_dw_driver = { 2326 .probe = tegra_pcie_dw_probe, 2327 .remove = tegra_pcie_dw_remove, 2328 .shutdown = tegra_pcie_dw_shutdown, 2329 .driver = { 2330 .name = "tegra194-pcie", 2331 .pm = &tegra_pcie_dw_pm_ops, 2332 .of_match_table = tegra_pcie_dw_of_match, 2333 }, 2334 }; 2335 module_platform_driver(tegra_pcie_dw_driver); 2336 2337 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); 2338 2339 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); 2340 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); 2341 MODULE_LICENSE("GPL v2"); 2342