1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for the following SoCs 4 * Tegra194 5 * Tegra234 6 * 7 * Copyright (C) 2019-2022 NVIDIA Corporation. 8 * 9 * Author: Vidya Sagar <vidyas@nvidia.com> 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio.h> 16 #include <linux/gpio/consumer.h> 17 #include <linux/interrupt.h> 18 #include <linux/iopoll.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/of_gpio.h> 24 #include <linux/of_pci.h> 25 #include <linux/pci.h> 26 #include <linux/phy/phy.h> 27 #include <linux/pinctrl/consumer.h> 28 #include <linux/platform_device.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/random.h> 31 #include <linux/reset.h> 32 #include <linux/resource.h> 33 #include <linux/types.h> 34 #include "pcie-designware.h" 35 #include <soc/tegra/bpmp.h> 36 #include <soc/tegra/bpmp-abi.h> 37 #include "../../pci.h" 38 39 #define TEGRA194_DWC_IP_VER 0x490A 40 #define TEGRA234_DWC_IP_VER 0x562A 41 42 #define APPL_PINMUX 0x0 43 #define APPL_PINMUX_PEX_RST BIT(0) 44 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) 45 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) 46 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) 47 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) 48 49 #define APPL_CTRL 0x4 50 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) 51 #define APPL_CTRL_LTSSM_EN BIT(7) 52 #define APPL_CTRL_HW_HOT_RST_EN BIT(20) 53 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) 54 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 55 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 56 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2 57 58 #define APPL_INTR_EN_L0_0 0x8 59 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) 60 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) 61 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) 62 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) 63 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) 64 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) 65 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) 66 67 #define APPL_INTR_STATUS_L0 0xC 68 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) 69 #define APPL_INTR_STATUS_L0_INT_INT BIT(8) 70 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) 71 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) 72 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) 73 74 #define APPL_INTR_EN_L1_0_0 0x1C 75 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) 76 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) 77 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) 78 79 #define APPL_INTR_STATUS_L1_0_0 0x20 80 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) 81 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) 82 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) 83 84 #define APPL_INTR_STATUS_L1_1 0x2C 85 #define APPL_INTR_STATUS_L1_2 0x30 86 #define APPL_INTR_STATUS_L1_3 0x34 87 #define APPL_INTR_STATUS_L1_6 0x3C 88 #define APPL_INTR_STATUS_L1_7 0x40 89 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) 90 91 #define APPL_INTR_EN_L1_8_0 0x44 92 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) 93 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) 94 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11) 95 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) 96 97 #define APPL_INTR_STATUS_L1_8_0 0x4C 98 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) 99 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) 100 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) 101 102 #define APPL_INTR_STATUS_L1_9 0x54 103 #define APPL_INTR_STATUS_L1_10 0x58 104 #define APPL_INTR_STATUS_L1_11 0x64 105 #define APPL_INTR_STATUS_L1_13 0x74 106 #define APPL_INTR_STATUS_L1_14 0x78 107 #define APPL_INTR_STATUS_L1_15 0x7C 108 #define APPL_INTR_STATUS_L1_17 0x88 109 110 #define APPL_INTR_EN_L1_18 0x90 111 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) 112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 113 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 114 115 #define APPL_INTR_STATUS_L1_18 0x94 116 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) 117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 118 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 119 120 #define APPL_MSI_CTRL_1 0xAC 121 122 #define APPL_MSI_CTRL_2 0xB0 123 124 #define APPL_LEGACY_INTX 0xB8 125 126 #define APPL_LTR_MSG_1 0xC4 127 #define LTR_MSG_REQ BIT(15) 128 #define LTR_MST_NO_SNOOP_SHIFT 16 129 130 #define APPL_LTR_MSG_2 0xC8 131 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) 132 133 #define APPL_LINK_STATUS 0xCC 134 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) 135 136 #define APPL_DEBUG 0xD0 137 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) 138 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11 139 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) 140 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 141 #define LTSSM_STATE_PRE_DETECT 5 142 143 #define APPL_RADM_STATUS 0xE4 144 #define APPL_PM_XMT_TURNOFF_STATE BIT(0) 145 146 #define APPL_DM_TYPE 0x100 147 #define APPL_DM_TYPE_MASK GENMASK(3, 0) 148 #define APPL_DM_TYPE_RP 0x4 149 #define APPL_DM_TYPE_EP 0x0 150 151 #define APPL_CFG_BASE_ADDR 0x104 152 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) 153 154 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 155 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) 156 157 #define APPL_CFG_MISC 0x110 158 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14) 159 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) 160 #define APPL_CFG_MISC_ARCACHE_SHIFT 10 161 #define APPL_CFG_MISC_ARCACHE_VAL 3 162 163 #define APPL_CFG_SLCG_OVERRIDE 0x114 164 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) 165 166 #define APPL_CAR_RESET_OVRD 0x12C 167 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) 168 169 #define IO_BASE_IO_DECODE BIT(0) 170 #define IO_BASE_IO_DECODE_BIT8 BIT(8) 171 172 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) 173 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) 174 175 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 176 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) 177 178 #define N_FTS_VAL 52 179 #define FTS_VAL 52 180 181 #define GEN3_EQ_CONTROL_OFF 0x8a8 182 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 183 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) 184 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) 185 186 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 187 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 188 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) 189 #define AMBA_ERROR_RESPONSE_CRS_OKAY 0 190 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 191 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 192 193 #define MSIX_ADDR_MATCH_LOW_OFF 0x940 194 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) 195 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) 196 197 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944 198 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) 199 200 #define PORT_LOGIC_MSIX_DOORBELL 0x948 201 202 #define CAP_SPCIE_CAP_OFF 0x154 203 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) 204 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) 205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 206 207 #define PME_ACK_TIMEOUT 10000 208 209 #define LTSSM_TIMEOUT 50000 /* 50ms */ 210 211 #define GEN3_GEN4_EQ_PRESET_INIT 5 212 213 #define GEN1_CORE_CLK_FREQ 62500000 214 #define GEN2_CORE_CLK_FREQ 125000000 215 #define GEN3_CORE_CLK_FREQ 250000000 216 #define GEN4_CORE_CLK_FREQ 500000000 217 218 #define LTR_MSG_TIMEOUT (100 * 1000) 219 220 #define PERST_DEBOUNCE_TIME (5 * 1000) 221 222 #define EP_STATE_DISABLED 0 223 #define EP_STATE_ENABLED 1 224 225 static const unsigned int pcie_gen_freq[] = { 226 GEN1_CORE_CLK_FREQ, 227 GEN2_CORE_CLK_FREQ, 228 GEN3_CORE_CLK_FREQ, 229 GEN4_CORE_CLK_FREQ 230 }; 231 232 struct tegra_pcie_dw_of_data { 233 u32 version; 234 enum dw_pcie_device_mode mode; 235 bool has_msix_doorbell_access_fix; 236 bool has_sbr_reset_fix; 237 bool has_l1ss_exit_fix; 238 bool has_ltr_req_fix; 239 u32 cdm_chk_int_en_bit; 240 u32 gen4_preset_vec; 241 u8 n_fts[2]; 242 }; 243 244 struct tegra_pcie_dw { 245 struct device *dev; 246 struct resource *appl_res; 247 struct resource *dbi_res; 248 struct resource *atu_dma_res; 249 void __iomem *appl_base; 250 struct clk *core_clk; 251 struct reset_control *core_apb_rst; 252 struct reset_control *core_rst; 253 struct dw_pcie pci; 254 struct tegra_bpmp *bpmp; 255 256 struct tegra_pcie_dw_of_data *of_data; 257 258 bool supports_clkreq; 259 bool enable_cdm_check; 260 bool enable_srns; 261 bool link_state; 262 bool update_fc_fixup; 263 bool enable_ext_refclk; 264 u8 init_link_width; 265 u32 msi_ctrl_int; 266 u32 num_lanes; 267 u32 cid; 268 u32 cfg_link_cap_l1sub; 269 u32 ras_des_cap; 270 u32 pcie_cap_base; 271 u32 aspm_cmrt; 272 u32 aspm_pwr_on_t; 273 u32 aspm_l0s_enter_lat; 274 275 struct regulator *pex_ctl_supply; 276 struct regulator *slot_ctl_3v3; 277 struct regulator *slot_ctl_12v; 278 279 unsigned int phy_count; 280 struct phy **phys; 281 282 struct dentry *debugfs; 283 284 /* Endpoint mode specific */ 285 struct gpio_desc *pex_rst_gpiod; 286 struct gpio_desc *pex_refclk_sel_gpiod; 287 unsigned int pex_rst_irq; 288 int ep_state; 289 }; 290 291 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) 292 { 293 return container_of(pci, struct tegra_pcie_dw, pci); 294 } 295 296 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, 297 const u32 reg) 298 { 299 writel_relaxed(value, pcie->appl_base + reg); 300 } 301 302 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) 303 { 304 return readl_relaxed(pcie->appl_base + reg); 305 } 306 307 struct tegra_pcie_soc { 308 enum dw_pcie_device_mode mode; 309 }; 310 311 static void apply_bad_link_workaround(struct dw_pcie_rp *pp) 312 { 313 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 314 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 315 u32 current_link_width; 316 u16 val; 317 318 /* 319 * NOTE:- Since this scenario is uncommon and link as such is not 320 * stable anyway, not waiting to confirm if link is really 321 * transitioning to Gen-2 speed 322 */ 323 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 324 if (val & PCI_EXP_LNKSTA_LBMS) { 325 current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> 326 PCI_EXP_LNKSTA_NLW_SHIFT; 327 if (pcie->init_link_width > current_link_width) { 328 dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); 329 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 330 PCI_EXP_LNKCTL2); 331 val &= ~PCI_EXP_LNKCTL2_TLS; 332 val |= PCI_EXP_LNKCTL2_TLS_2_5GT; 333 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 334 PCI_EXP_LNKCTL2, val); 335 336 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 337 PCI_EXP_LNKCTL); 338 val |= PCI_EXP_LNKCTL_RL; 339 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 340 PCI_EXP_LNKCTL, val); 341 } 342 } 343 } 344 345 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) 346 { 347 struct tegra_pcie_dw *pcie = arg; 348 struct dw_pcie *pci = &pcie->pci; 349 struct dw_pcie_rp *pp = &pci->pp; 350 u32 val, status_l0, status_l1; 351 u16 val_w; 352 353 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 354 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 355 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 356 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 357 if (!pcie->of_data->has_sbr_reset_fix && 358 status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { 359 /* SBR & Surprise Link Down WAR */ 360 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 361 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 362 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 363 udelay(1); 364 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 365 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 366 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 367 368 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 369 val |= PORT_LOGIC_SPEED_CHANGE; 370 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 371 } 372 } 373 374 if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { 375 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); 376 if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { 377 appl_writel(pcie, 378 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, 379 APPL_INTR_STATUS_L1_8_0); 380 apply_bad_link_workaround(pp); 381 } 382 if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { 383 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 384 PCI_EXP_LNKSTA); 385 val_w |= PCI_EXP_LNKSTA_LBMS; 386 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 387 PCI_EXP_LNKSTA, val_w); 388 389 appl_writel(pcie, 390 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, 391 APPL_INTR_STATUS_L1_8_0); 392 393 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 394 PCI_EXP_LNKSTA); 395 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & 396 PCI_EXP_LNKSTA_CLS); 397 } 398 } 399 400 if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { 401 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); 402 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 403 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { 404 dev_info(pci->dev, "CDM check complete\n"); 405 val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; 406 } 407 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { 408 dev_err(pci->dev, "CDM comparison mismatch\n"); 409 val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; 410 } 411 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { 412 dev_err(pci->dev, "CDM Logic error\n"); 413 val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; 414 } 415 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 416 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); 417 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val); 418 } 419 420 return IRQ_HANDLED; 421 } 422 423 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) 424 { 425 u32 val; 426 427 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 428 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 429 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 430 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 431 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 432 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 433 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 434 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 435 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 436 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 437 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 438 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 439 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 440 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 441 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 442 appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); 443 444 val = appl_readl(pcie, APPL_CTRL); 445 val |= APPL_CTRL_LTSSM_EN; 446 appl_writel(pcie, val, APPL_CTRL); 447 } 448 449 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) 450 { 451 struct tegra_pcie_dw *pcie = arg; 452 struct dw_pcie *pci = &pcie->pci; 453 u32 val, speed; 454 455 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & 456 PCI_EXP_LNKSTA_CLS; 457 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); 458 459 if (pcie->of_data->has_ltr_req_fix) 460 return IRQ_HANDLED; 461 462 /* If EP doesn't advertise L1SS, just return */ 463 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 464 if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) 465 return IRQ_HANDLED; 466 467 /* Check if BME is set to '1' */ 468 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 469 if (val & PCI_COMMAND_MASTER) { 470 ktime_t timeout; 471 472 /* 110us for both snoop and no-snoop */ 473 val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; 474 val |= (val << LTR_MST_NO_SNOOP_SHIFT); 475 appl_writel(pcie, val, APPL_LTR_MSG_1); 476 477 /* Send LTR upstream */ 478 val = appl_readl(pcie, APPL_LTR_MSG_2); 479 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 480 appl_writel(pcie, val, APPL_LTR_MSG_2); 481 482 timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); 483 for (;;) { 484 val = appl_readl(pcie, APPL_LTR_MSG_2); 485 if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) 486 break; 487 if (ktime_after(ktime_get(), timeout)) 488 break; 489 usleep_range(1000, 1100); 490 } 491 if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) 492 dev_err(pcie->dev, "Failed to send LTR message\n"); 493 } 494 495 return IRQ_HANDLED; 496 } 497 498 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) 499 { 500 struct tegra_pcie_dw *pcie = arg; 501 struct dw_pcie_ep *ep = &pcie->pci.ep; 502 int spurious = 1; 503 u32 status_l0, status_l1, link_status; 504 505 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 506 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 507 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 508 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 509 510 if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) 511 pex_ep_event_hot_rst_done(pcie); 512 513 if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { 514 link_status = appl_readl(pcie, APPL_LINK_STATUS); 515 if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) { 516 dev_dbg(pcie->dev, "Link is up with Host\n"); 517 dw_pcie_ep_linkup(ep); 518 } 519 } 520 521 spurious = 0; 522 } 523 524 if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { 525 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15); 526 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15); 527 528 if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) 529 return IRQ_WAKE_THREAD; 530 531 spurious = 0; 532 } 533 534 if (spurious) { 535 dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", 536 status_l0); 537 appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0); 538 } 539 540 return IRQ_HANDLED; 541 } 542 543 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, 544 int size, u32 *val) 545 { 546 struct dw_pcie_rp *pp = bus->sysdata; 547 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 548 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 549 550 /* 551 * This is an endpoint mode specific register happen to appear even 552 * when controller is operating in root port mode and system hangs 553 * when it is accessed with link being in ASPM-L1 state. 554 * So skip accessing it altogether 555 */ 556 if (!pcie->of_data->has_msix_doorbell_access_fix && 557 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { 558 *val = 0x00000000; 559 return PCIBIOS_SUCCESSFUL; 560 } 561 562 return pci_generic_config_read(bus, devfn, where, size, val); 563 } 564 565 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, 566 int size, u32 val) 567 { 568 struct dw_pcie_rp *pp = bus->sysdata; 569 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 570 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 571 572 /* 573 * This is an endpoint mode specific register happen to appear even 574 * when controller is operating in root port mode and system hangs 575 * when it is accessed with link being in ASPM-L1 state. 576 * So skip accessing it altogether 577 */ 578 if (!pcie->of_data->has_msix_doorbell_access_fix && 579 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) 580 return PCIBIOS_SUCCESSFUL; 581 582 return pci_generic_config_write(bus, devfn, where, size, val); 583 } 584 585 static struct pci_ops tegra_pci_ops = { 586 .map_bus = dw_pcie_own_conf_map_bus, 587 .read = tegra_pcie_dw_rd_own_conf, 588 .write = tegra_pcie_dw_wr_own_conf, 589 }; 590 591 #if defined(CONFIG_PCIEASPM) 592 static void disable_aspm_l11(struct tegra_pcie_dw *pcie) 593 { 594 u32 val; 595 596 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 597 val &= ~PCI_L1SS_CAP_ASPM_L1_1; 598 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 599 } 600 601 static void disable_aspm_l12(struct tegra_pcie_dw *pcie) 602 { 603 u32 val; 604 605 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 606 val &= ~PCI_L1SS_CAP_ASPM_L1_2; 607 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 608 } 609 610 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) 611 { 612 u32 val; 613 614 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 615 PCIE_RAS_DES_EVENT_COUNTER_CONTROL); 616 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); 617 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 618 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; 619 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 620 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 621 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 622 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 623 PCIE_RAS_DES_EVENT_COUNTER_DATA); 624 625 return val; 626 } 627 628 static int aspm_state_cnt(struct seq_file *s, void *data) 629 { 630 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) 631 dev_get_drvdata(s->private); 632 u32 val; 633 634 seq_printf(s, "Tx L0s entry count : %u\n", 635 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); 636 637 seq_printf(s, "Rx L0s entry count : %u\n", 638 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); 639 640 seq_printf(s, "Link L1 entry count : %u\n", 641 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); 642 643 seq_printf(s, "Link L1.1 entry count : %u\n", 644 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); 645 646 seq_printf(s, "Link L1.2 entry count : %u\n", 647 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); 648 649 /* Clear all counters */ 650 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 651 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, 652 EVENT_COUNTER_ALL_CLEAR); 653 654 /* Re-enable counting */ 655 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 656 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 657 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 658 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 659 660 return 0; 661 } 662 663 static void init_host_aspm(struct tegra_pcie_dw *pcie) 664 { 665 struct dw_pcie *pci = &pcie->pci; 666 u32 val; 667 668 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); 669 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; 670 671 pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, 672 PCI_EXT_CAP_ID_VNDR); 673 674 /* Enable ASPM counters */ 675 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 676 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 677 dw_pcie_writel_dbi(pci, pcie->ras_des_cap + 678 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 679 680 /* Program T_cmrt and T_pwr_on values */ 681 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 682 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); 683 val |= (pcie->aspm_cmrt << 8); 684 val |= (pcie->aspm_pwr_on_t << 19); 685 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); 686 687 /* Program L0s and L1 entrance latencies */ 688 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); 689 val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; 690 val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); 691 val |= PORT_AFR_ENTER_ASPM; 692 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); 693 } 694 695 static void init_debugfs(struct tegra_pcie_dw *pcie) 696 { 697 debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, 698 aspm_state_cnt); 699 } 700 #else 701 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } 702 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } 703 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } 704 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } 705 #endif 706 707 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) 708 { 709 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 710 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 711 u32 val; 712 u16 val_w; 713 714 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 715 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 716 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 717 718 if (!pcie->of_data->has_sbr_reset_fix) { 719 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 720 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; 721 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 722 } 723 724 if (pcie->enable_cdm_check) { 725 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 726 val |= pcie->of_data->cdm_chk_int_en_bit; 727 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 728 729 val = appl_readl(pcie, APPL_INTR_EN_L1_18); 730 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; 731 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; 732 appl_writel(pcie, val, APPL_INTR_EN_L1_18); 733 } 734 735 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 736 PCI_EXP_LNKSTA); 737 pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >> 738 PCI_EXP_LNKSTA_NLW_SHIFT; 739 740 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 741 PCI_EXP_LNKCTL); 742 val_w |= PCI_EXP_LNKCTL_LBMIE; 743 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, 744 val_w); 745 } 746 747 static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp) 748 { 749 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 750 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 751 u32 val; 752 753 /* Enable legacy interrupt generation */ 754 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 755 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 756 val |= APPL_INTR_EN_L0_0_INT_INT_EN; 757 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 758 759 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); 760 val |= APPL_INTR_EN_L1_8_INTX_EN; 761 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; 762 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; 763 if (IS_ENABLED(CONFIG_PCIEAER)) 764 val |= APPL_INTR_EN_L1_8_AER_INT_EN; 765 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); 766 } 767 768 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp) 769 { 770 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 771 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 772 u32 val; 773 774 /* Enable MSI interrupt generation */ 775 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 776 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; 777 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; 778 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 779 } 780 781 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) 782 { 783 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 784 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 785 786 /* Clear interrupt statuses before enabling interrupts */ 787 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 788 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 789 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 790 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 791 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 792 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 793 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 794 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 795 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 796 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 797 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 798 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 799 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 800 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 801 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 802 803 tegra_pcie_enable_system_interrupts(pp); 804 tegra_pcie_enable_legacy_interrupts(pp); 805 if (IS_ENABLED(CONFIG_PCI_MSI)) 806 tegra_pcie_enable_msi_interrupts(pp); 807 } 808 809 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) 810 { 811 struct dw_pcie *pci = &pcie->pci; 812 u32 val, offset, i; 813 814 /* Program init preset */ 815 for (i = 0; i < pcie->num_lanes; i++) { 816 val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2)); 817 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; 818 val |= GEN3_GEN4_EQ_PRESET_INIT; 819 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; 820 val |= (GEN3_GEN4_EQ_PRESET_INIT << 821 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); 822 dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val); 823 824 offset = dw_pcie_find_ext_capability(pci, 825 PCI_EXT_CAP_ID_PL_16GT) + 826 PCI_PL_16GT_LE_CTRL; 827 val = dw_pcie_readb_dbi(pci, offset + i); 828 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; 829 val |= GEN3_GEN4_EQ_PRESET_INIT; 830 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; 831 val |= (GEN3_GEN4_EQ_PRESET_INIT << 832 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); 833 dw_pcie_writeb_dbi(pci, offset + i, val); 834 } 835 836 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 837 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 838 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 839 840 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 841 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 842 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 843 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 844 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 845 846 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 847 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 848 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); 849 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 850 851 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 852 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 853 val |= (pcie->of_data->gen4_preset_vec << 854 GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 855 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 856 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 857 858 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 859 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 860 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 861 } 862 863 static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) 864 { 865 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 866 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 867 u32 val; 868 u16 val_16; 869 870 pp->bridge->ops = &tegra_pci_ops; 871 872 if (!pcie->pcie_cap_base) 873 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 874 PCI_CAP_ID_EXP); 875 876 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); 877 val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 878 val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; 879 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); 880 881 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); 882 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); 883 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); 884 885 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); 886 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; 887 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; 888 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); 889 890 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 891 892 /* Enable as 0xFFFF0001 response for CRS */ 893 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); 894 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); 895 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << 896 AMBA_ERROR_RESPONSE_CRS_SHIFT); 897 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); 898 899 /* Configure Max lane width from DT */ 900 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); 901 val &= ~PCI_EXP_LNKCAP_MLW; 902 val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); 903 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); 904 905 /* Clear Slot Clock Configuration bit if SRNS configuration */ 906 if (pcie->enable_srns) { 907 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 908 PCI_EXP_LNKSTA); 909 val_16 &= ~PCI_EXP_LNKSTA_SLC; 910 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 911 val_16); 912 } 913 914 config_gen3_gen4_eq_presets(pcie); 915 916 init_host_aspm(pcie); 917 918 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ 919 if (!pcie->supports_clkreq) { 920 disable_aspm_l11(pcie); 921 disable_aspm_l12(pcie); 922 } 923 924 if (!pcie->of_data->has_l1ss_exit_fix) { 925 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 926 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 927 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 928 } 929 930 if (pcie->update_fc_fixup) { 931 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 932 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 933 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 934 } 935 936 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 937 938 return 0; 939 } 940 941 static int tegra_pcie_dw_start_link(struct dw_pcie *pci) 942 { 943 u32 val, offset, speed, tmp; 944 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 945 struct dw_pcie_rp *pp = &pci->pp; 946 bool retry = true; 947 948 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 949 enable_irq(pcie->pex_rst_irq); 950 return 0; 951 } 952 953 retry_link: 954 /* Assert RST */ 955 val = appl_readl(pcie, APPL_PINMUX); 956 val &= ~APPL_PINMUX_PEX_RST; 957 appl_writel(pcie, val, APPL_PINMUX); 958 959 usleep_range(100, 200); 960 961 /* Enable LTSSM */ 962 val = appl_readl(pcie, APPL_CTRL); 963 val |= APPL_CTRL_LTSSM_EN; 964 appl_writel(pcie, val, APPL_CTRL); 965 966 /* De-assert RST */ 967 val = appl_readl(pcie, APPL_PINMUX); 968 val |= APPL_PINMUX_PEX_RST; 969 appl_writel(pcie, val, APPL_PINMUX); 970 971 msleep(100); 972 973 if (dw_pcie_wait_for_link(pci)) { 974 if (!retry) 975 return 0; 976 /* 977 * There are some endpoints which can't get the link up if 978 * root port has Data Link Feature (DLF) enabled. 979 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info 980 * on Scaled Flow Control and DLF. 981 * So, need to confirm that is indeed the case here and attempt 982 * link up once again with DLF disabled. 983 */ 984 val = appl_readl(pcie, APPL_DEBUG); 985 val &= APPL_DEBUG_LTSSM_STATE_MASK; 986 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; 987 tmp = appl_readl(pcie, APPL_LINK_STATUS); 988 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; 989 if (!(val == 0x11 && !tmp)) { 990 /* Link is down for all good reasons */ 991 return 0; 992 } 993 994 dev_info(pci->dev, "Link is down in DLL"); 995 dev_info(pci->dev, "Trying again with DLFE disabled\n"); 996 /* Disable LTSSM */ 997 val = appl_readl(pcie, APPL_CTRL); 998 val &= ~APPL_CTRL_LTSSM_EN; 999 appl_writel(pcie, val, APPL_CTRL); 1000 1001 reset_control_assert(pcie->core_rst); 1002 reset_control_deassert(pcie->core_rst); 1003 1004 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); 1005 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); 1006 val &= ~PCI_DLF_EXCHANGE_ENABLE; 1007 dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); 1008 1009 tegra_pcie_dw_host_init(pp); 1010 dw_pcie_setup_rc(pp); 1011 1012 retry = false; 1013 goto retry_link; 1014 } 1015 1016 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & 1017 PCI_EXP_LNKSTA_CLS; 1018 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); 1019 1020 tegra_pcie_enable_interrupts(pp); 1021 1022 return 0; 1023 } 1024 1025 static int tegra_pcie_dw_link_up(struct dw_pcie *pci) 1026 { 1027 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1028 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 1029 1030 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1031 } 1032 1033 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) 1034 { 1035 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1036 1037 disable_irq(pcie->pex_rst_irq); 1038 } 1039 1040 static const struct dw_pcie_ops tegra_dw_pcie_ops = { 1041 .link_up = tegra_pcie_dw_link_up, 1042 .start_link = tegra_pcie_dw_start_link, 1043 .stop_link = tegra_pcie_dw_stop_link, 1044 }; 1045 1046 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { 1047 .host_init = tegra_pcie_dw_host_init, 1048 }; 1049 1050 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) 1051 { 1052 unsigned int phy_count = pcie->phy_count; 1053 1054 while (phy_count--) { 1055 phy_power_off(pcie->phys[phy_count]); 1056 phy_exit(pcie->phys[phy_count]); 1057 } 1058 } 1059 1060 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) 1061 { 1062 unsigned int i; 1063 int ret; 1064 1065 for (i = 0; i < pcie->phy_count; i++) { 1066 ret = phy_init(pcie->phys[i]); 1067 if (ret < 0) 1068 goto phy_power_off; 1069 1070 ret = phy_power_on(pcie->phys[i]); 1071 if (ret < 0) 1072 goto phy_exit; 1073 } 1074 1075 return 0; 1076 1077 phy_power_off: 1078 while (i--) { 1079 phy_power_off(pcie->phys[i]); 1080 phy_exit: 1081 phy_exit(pcie->phys[i]); 1082 } 1083 1084 return ret; 1085 } 1086 1087 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) 1088 { 1089 struct platform_device *pdev = to_platform_device(pcie->dev); 1090 struct device_node *np = pcie->dev->of_node; 1091 int ret; 1092 1093 pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 1094 if (!pcie->dbi_res) { 1095 dev_err(pcie->dev, "Failed to find \"dbi\" region\n"); 1096 return -ENODEV; 1097 } 1098 1099 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); 1100 if (ret < 0) { 1101 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); 1102 return ret; 1103 } 1104 1105 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", 1106 &pcie->aspm_pwr_on_t); 1107 if (ret < 0) 1108 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", 1109 ret); 1110 1111 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", 1112 &pcie->aspm_l0s_enter_lat); 1113 if (ret < 0) 1114 dev_info(pcie->dev, 1115 "Failed to read ASPM L0s Entrance latency: %d\n", ret); 1116 1117 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); 1118 if (ret < 0) { 1119 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); 1120 return ret; 1121 } 1122 1123 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); 1124 if (ret) { 1125 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); 1126 return ret; 1127 } 1128 1129 ret = of_property_count_strings(np, "phy-names"); 1130 if (ret < 0) { 1131 dev_err(pcie->dev, "Failed to find PHY entries: %d\n", 1132 ret); 1133 return ret; 1134 } 1135 pcie->phy_count = ret; 1136 1137 if (of_property_read_bool(np, "nvidia,update-fc-fixup")) 1138 pcie->update_fc_fixup = true; 1139 1140 /* RP using an external REFCLK is supported only in Tegra234 */ 1141 if (pcie->of_data->version == TEGRA194_DWC_IP_VER) { 1142 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 1143 pcie->enable_ext_refclk = true; 1144 } else { 1145 pcie->enable_ext_refclk = 1146 of_property_read_bool(pcie->dev->of_node, 1147 "nvidia,enable-ext-refclk"); 1148 } 1149 1150 pcie->supports_clkreq = 1151 of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); 1152 1153 pcie->enable_cdm_check = 1154 of_property_read_bool(np, "snps,enable-cdm-check"); 1155 1156 if (pcie->of_data->version == TEGRA234_DWC_IP_VER) 1157 pcie->enable_srns = 1158 of_property_read_bool(np, "nvidia,enable-srns"); 1159 1160 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) 1161 return 0; 1162 1163 /* Endpoint mode specific DT entries */ 1164 pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); 1165 if (IS_ERR(pcie->pex_rst_gpiod)) { 1166 int err = PTR_ERR(pcie->pex_rst_gpiod); 1167 const char *level = KERN_ERR; 1168 1169 if (err == -EPROBE_DEFER) 1170 level = KERN_DEBUG; 1171 1172 dev_printk(level, pcie->dev, 1173 dev_fmt("Failed to get PERST GPIO: %d\n"), 1174 err); 1175 return err; 1176 } 1177 1178 pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, 1179 "nvidia,refclk-select", 1180 GPIOD_OUT_HIGH); 1181 if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { 1182 int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); 1183 const char *level = KERN_ERR; 1184 1185 if (err == -EPROBE_DEFER) 1186 level = KERN_DEBUG; 1187 1188 dev_printk(level, pcie->dev, 1189 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), 1190 err); 1191 pcie->pex_refclk_sel_gpiod = NULL; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, 1198 bool enable) 1199 { 1200 struct mrq_uphy_response resp; 1201 struct tegra_bpmp_message msg; 1202 struct mrq_uphy_request req; 1203 1204 /* 1205 * Controller-5 doesn't need to have its state set by BPMP-FW in 1206 * Tegra194 1207 */ 1208 if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5) 1209 return 0; 1210 1211 memset(&req, 0, sizeof(req)); 1212 memset(&resp, 0, sizeof(resp)); 1213 1214 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; 1215 req.controller_state.pcie_controller = pcie->cid; 1216 req.controller_state.enable = enable; 1217 1218 memset(&msg, 0, sizeof(msg)); 1219 msg.mrq = MRQ_UPHY; 1220 msg.tx.data = &req; 1221 msg.tx.size = sizeof(req); 1222 msg.rx.data = &resp; 1223 msg.rx.size = sizeof(resp); 1224 1225 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1226 } 1227 1228 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, 1229 bool enable) 1230 { 1231 struct mrq_uphy_response resp; 1232 struct tegra_bpmp_message msg; 1233 struct mrq_uphy_request req; 1234 1235 memset(&req, 0, sizeof(req)); 1236 memset(&resp, 0, sizeof(resp)); 1237 1238 if (enable) { 1239 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; 1240 req.ep_ctrlr_pll_init.ep_controller = pcie->cid; 1241 } else { 1242 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; 1243 req.ep_ctrlr_pll_off.ep_controller = pcie->cid; 1244 } 1245 1246 memset(&msg, 0, sizeof(msg)); 1247 msg.mrq = MRQ_UPHY; 1248 msg.tx.data = &req; 1249 msg.tx.size = sizeof(req); 1250 msg.rx.data = &resp; 1251 msg.rx.size = sizeof(resp); 1252 1253 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1254 } 1255 1256 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) 1257 { 1258 struct dw_pcie_rp *pp = &pcie->pci.pp; 1259 struct pci_bus *child, *root_bus = NULL; 1260 struct pci_dev *pdev; 1261 1262 /* 1263 * link doesn't go into L2 state with some of the endpoints with Tegra 1264 * if they are not in D0 state. So, need to make sure that immediate 1265 * downstream devices are in D0 state before sending PME_TurnOff to put 1266 * link into L2 state. 1267 * This is as per PCI Express Base r4.0 v1.0 September 27-2017, 1268 * 5.2 Link State Power Management (Page #428). 1269 */ 1270 1271 list_for_each_entry(child, &pp->bridge->bus->children, node) { 1272 /* Bring downstream devices to D0 if they are not already in */ 1273 if (child->parent == pp->bridge->bus) { 1274 root_bus = child; 1275 break; 1276 } 1277 } 1278 1279 if (!root_bus) { 1280 dev_err(pcie->dev, "Failed to find downstream devices\n"); 1281 return; 1282 } 1283 1284 list_for_each_entry(pdev, &root_bus->devices, bus_list) { 1285 if (PCI_SLOT(pdev->devfn) == 0) { 1286 if (pci_set_power_state(pdev, PCI_D0)) 1287 dev_err(pcie->dev, 1288 "Failed to transition %s to D0 state\n", 1289 dev_name(&pdev->dev)); 1290 } 1291 } 1292 } 1293 1294 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) 1295 { 1296 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); 1297 if (IS_ERR(pcie->slot_ctl_3v3)) { 1298 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) 1299 return PTR_ERR(pcie->slot_ctl_3v3); 1300 1301 pcie->slot_ctl_3v3 = NULL; 1302 } 1303 1304 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); 1305 if (IS_ERR(pcie->slot_ctl_12v)) { 1306 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) 1307 return PTR_ERR(pcie->slot_ctl_12v); 1308 1309 pcie->slot_ctl_12v = NULL; 1310 } 1311 1312 return 0; 1313 } 1314 1315 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) 1316 { 1317 int ret; 1318 1319 if (pcie->slot_ctl_3v3) { 1320 ret = regulator_enable(pcie->slot_ctl_3v3); 1321 if (ret < 0) { 1322 dev_err(pcie->dev, 1323 "Failed to enable 3.3V slot supply: %d\n", ret); 1324 return ret; 1325 } 1326 } 1327 1328 if (pcie->slot_ctl_12v) { 1329 ret = regulator_enable(pcie->slot_ctl_12v); 1330 if (ret < 0) { 1331 dev_err(pcie->dev, 1332 "Failed to enable 12V slot supply: %d\n", ret); 1333 goto fail_12v_enable; 1334 } 1335 } 1336 1337 /* 1338 * According to PCI Express Card Electromechanical Specification 1339 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) 1340 * should be a minimum of 100ms. 1341 */ 1342 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) 1343 msleep(100); 1344 1345 return 0; 1346 1347 fail_12v_enable: 1348 if (pcie->slot_ctl_3v3) 1349 regulator_disable(pcie->slot_ctl_3v3); 1350 return ret; 1351 } 1352 1353 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) 1354 { 1355 if (pcie->slot_ctl_12v) 1356 regulator_disable(pcie->slot_ctl_12v); 1357 if (pcie->slot_ctl_3v3) 1358 regulator_disable(pcie->slot_ctl_3v3); 1359 } 1360 1361 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, 1362 bool en_hw_hot_rst) 1363 { 1364 int ret; 1365 u32 val; 1366 1367 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1368 if (ret) { 1369 dev_err(pcie->dev, 1370 "Failed to enable controller %u: %d\n", pcie->cid, ret); 1371 return ret; 1372 } 1373 1374 if (pcie->enable_ext_refclk) { 1375 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1376 if (ret) { 1377 dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret); 1378 goto fail_pll_init; 1379 } 1380 } 1381 1382 ret = tegra_pcie_enable_slot_regulators(pcie); 1383 if (ret < 0) 1384 goto fail_slot_reg_en; 1385 1386 ret = regulator_enable(pcie->pex_ctl_supply); 1387 if (ret < 0) { 1388 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); 1389 goto fail_reg_en; 1390 } 1391 1392 ret = clk_prepare_enable(pcie->core_clk); 1393 if (ret) { 1394 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); 1395 goto fail_core_clk; 1396 } 1397 1398 ret = reset_control_deassert(pcie->core_apb_rst); 1399 if (ret) { 1400 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", 1401 ret); 1402 goto fail_core_apb_rst; 1403 } 1404 1405 if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) { 1406 /* Enable HW_HOT_RST mode */ 1407 val = appl_readl(pcie, APPL_CTRL); 1408 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 1409 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1410 val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << 1411 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1412 val |= APPL_CTRL_HW_HOT_RST_EN; 1413 appl_writel(pcie, val, APPL_CTRL); 1414 } 1415 1416 ret = tegra_pcie_enable_phy(pcie); 1417 if (ret) { 1418 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); 1419 goto fail_phy; 1420 } 1421 1422 /* Update CFG base address */ 1423 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1424 APPL_CFG_BASE_ADDR); 1425 1426 /* Configure this core for RP mode operation */ 1427 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); 1428 1429 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1430 1431 val = appl_readl(pcie, APPL_CTRL); 1432 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); 1433 1434 val = appl_readl(pcie, APPL_CFG_MISC); 1435 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1436 appl_writel(pcie, val, APPL_CFG_MISC); 1437 1438 if (pcie->enable_srns || pcie->enable_ext_refclk) { 1439 /* 1440 * When Tegra PCIe RP is using external clock, it cannot supply 1441 * same clock to its downstream hierarchy. Hence, gate PCIe RP 1442 * REFCLK out pads when RP & EP are using separate clocks or RP 1443 * is using an external REFCLK. 1444 */ 1445 val = appl_readl(pcie, APPL_PINMUX); 1446 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1447 val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1448 appl_writel(pcie, val, APPL_PINMUX); 1449 } 1450 1451 if (!pcie->supports_clkreq) { 1452 val = appl_readl(pcie, APPL_PINMUX); 1453 val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; 1454 val &= ~APPL_PINMUX_CLKREQ_OVERRIDE; 1455 appl_writel(pcie, val, APPL_PINMUX); 1456 } 1457 1458 /* Update iATU_DMA base address */ 1459 appl_writel(pcie, 1460 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1461 APPL_CFG_IATU_DMA_BASE_ADDR); 1462 1463 reset_control_deassert(pcie->core_rst); 1464 1465 return ret; 1466 1467 fail_phy: 1468 reset_control_assert(pcie->core_apb_rst); 1469 fail_core_apb_rst: 1470 clk_disable_unprepare(pcie->core_clk); 1471 fail_core_clk: 1472 regulator_disable(pcie->pex_ctl_supply); 1473 fail_reg_en: 1474 tegra_pcie_disable_slot_regulators(pcie); 1475 fail_slot_reg_en: 1476 if (pcie->enable_ext_refclk) 1477 tegra_pcie_bpmp_set_pll_state(pcie, false); 1478 fail_pll_init: 1479 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1480 1481 return ret; 1482 } 1483 1484 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) 1485 { 1486 int ret; 1487 1488 ret = reset_control_assert(pcie->core_rst); 1489 if (ret) 1490 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret); 1491 1492 tegra_pcie_disable_phy(pcie); 1493 1494 ret = reset_control_assert(pcie->core_apb_rst); 1495 if (ret) 1496 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); 1497 1498 clk_disable_unprepare(pcie->core_clk); 1499 1500 ret = regulator_disable(pcie->pex_ctl_supply); 1501 if (ret) 1502 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); 1503 1504 tegra_pcie_disable_slot_regulators(pcie); 1505 1506 if (pcie->enable_ext_refclk) { 1507 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1508 if (ret) 1509 dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret); 1510 } 1511 1512 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1513 if (ret) 1514 dev_err(pcie->dev, "Failed to disable controller %d: %d\n", 1515 pcie->cid, ret); 1516 } 1517 1518 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) 1519 { 1520 struct dw_pcie *pci = &pcie->pci; 1521 struct dw_pcie_rp *pp = &pci->pp; 1522 int ret; 1523 1524 ret = tegra_pcie_config_controller(pcie, false); 1525 if (ret < 0) 1526 return ret; 1527 1528 pp->ops = &tegra_pcie_dw_host_ops; 1529 1530 ret = dw_pcie_host_init(pp); 1531 if (ret < 0) { 1532 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); 1533 goto fail_host_init; 1534 } 1535 1536 return 0; 1537 1538 fail_host_init: 1539 tegra_pcie_unconfig_controller(pcie); 1540 return ret; 1541 } 1542 1543 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) 1544 { 1545 u32 val; 1546 1547 if (!tegra_pcie_dw_link_up(&pcie->pci)) 1548 return 0; 1549 1550 val = appl_readl(pcie, APPL_RADM_STATUS); 1551 val |= APPL_PM_XMT_TURNOFF_STATE; 1552 appl_writel(pcie, val, APPL_RADM_STATUS); 1553 1554 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, 1555 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, 1556 1, PME_ACK_TIMEOUT); 1557 } 1558 1559 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) 1560 { 1561 u32 data; 1562 int err; 1563 1564 if (!tegra_pcie_dw_link_up(&pcie->pci)) { 1565 dev_dbg(pcie->dev, "PCIe link is not up...!\n"); 1566 return; 1567 } 1568 1569 /* 1570 * PCIe controller exits from L2 only if reset is applied, so 1571 * controller doesn't handle interrupts. But in cases where 1572 * L2 entry fails, PERST# is asserted which can trigger surprise 1573 * link down AER. However this function call happens in 1574 * suspend_noirq(), so AER interrupt will not be processed. 1575 * Disable all interrupts to avoid such a scenario. 1576 */ 1577 appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0); 1578 1579 if (tegra_pcie_try_link_l2(pcie)) { 1580 dev_info(pcie->dev, "Link didn't transition to L2 state\n"); 1581 /* 1582 * TX lane clock freq will reset to Gen1 only if link is in L2 1583 * or detect state. 1584 * So apply pex_rst to end point to force RP to go into detect 1585 * state 1586 */ 1587 data = appl_readl(pcie, APPL_PINMUX); 1588 data &= ~APPL_PINMUX_PEX_RST; 1589 appl_writel(pcie, data, APPL_PINMUX); 1590 1591 /* 1592 * Some cards do not go to detect state even after de-asserting 1593 * PERST#. So, de-assert LTSSM to bring link to detect state. 1594 */ 1595 data = readl(pcie->appl_base + APPL_CTRL); 1596 data &= ~APPL_CTRL_LTSSM_EN; 1597 writel(data, pcie->appl_base + APPL_CTRL); 1598 1599 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, 1600 data, 1601 ((data & 1602 APPL_DEBUG_LTSSM_STATE_MASK) >> 1603 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1604 LTSSM_STATE_PRE_DETECT, 1605 1, LTSSM_TIMEOUT); 1606 if (err) 1607 dev_info(pcie->dev, "Link didn't go to detect state\n"); 1608 } 1609 /* 1610 * DBI registers may not be accessible after this as PLL-E would be 1611 * down depending on how CLKREQ is pulled by end point 1612 */ 1613 data = appl_readl(pcie, APPL_PINMUX); 1614 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); 1615 /* Cut REFCLK to slot */ 1616 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1617 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1618 appl_writel(pcie, data, APPL_PINMUX); 1619 } 1620 1621 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) 1622 { 1623 tegra_pcie_downstream_dev_to_D0(pcie); 1624 dw_pcie_host_deinit(&pcie->pci.pp); 1625 tegra_pcie_dw_pme_turnoff(pcie); 1626 tegra_pcie_unconfig_controller(pcie); 1627 } 1628 1629 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) 1630 { 1631 struct device *dev = pcie->dev; 1632 char *name; 1633 int ret; 1634 1635 pm_runtime_enable(dev); 1636 1637 ret = pm_runtime_get_sync(dev); 1638 if (ret < 0) { 1639 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1640 ret); 1641 goto fail_pm_get_sync; 1642 } 1643 1644 ret = pinctrl_pm_select_default_state(dev); 1645 if (ret < 0) { 1646 dev_err(dev, "Failed to configure sideband pins: %d\n", ret); 1647 goto fail_pm_get_sync; 1648 } 1649 1650 ret = tegra_pcie_init_controller(pcie); 1651 if (ret < 0) { 1652 dev_err(dev, "Failed to initialize controller: %d\n", ret); 1653 goto fail_pm_get_sync; 1654 } 1655 1656 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); 1657 if (!pcie->link_state) { 1658 ret = -ENOMEDIUM; 1659 goto fail_host_init; 1660 } 1661 1662 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1663 if (!name) { 1664 ret = -ENOMEM; 1665 goto fail_host_init; 1666 } 1667 1668 pcie->debugfs = debugfs_create_dir(name, NULL); 1669 init_debugfs(pcie); 1670 1671 return ret; 1672 1673 fail_host_init: 1674 tegra_pcie_deinit_controller(pcie); 1675 fail_pm_get_sync: 1676 pm_runtime_put_sync(dev); 1677 pm_runtime_disable(dev); 1678 return ret; 1679 } 1680 1681 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) 1682 { 1683 u32 val; 1684 int ret; 1685 1686 if (pcie->ep_state == EP_STATE_DISABLED) 1687 return; 1688 1689 /* Disable LTSSM */ 1690 val = appl_readl(pcie, APPL_CTRL); 1691 val &= ~APPL_CTRL_LTSSM_EN; 1692 appl_writel(pcie, val, APPL_CTRL); 1693 1694 ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, 1695 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> 1696 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1697 LTSSM_STATE_PRE_DETECT, 1698 1, LTSSM_TIMEOUT); 1699 if (ret) 1700 dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); 1701 1702 reset_control_assert(pcie->core_rst); 1703 1704 tegra_pcie_disable_phy(pcie); 1705 1706 reset_control_assert(pcie->core_apb_rst); 1707 1708 clk_disable_unprepare(pcie->core_clk); 1709 1710 pm_runtime_put_sync(pcie->dev); 1711 1712 if (pcie->enable_ext_refclk) { 1713 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1714 if (ret) 1715 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", 1716 ret); 1717 } 1718 1719 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1720 if (ret) 1721 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); 1722 1723 pcie->ep_state = EP_STATE_DISABLED; 1724 dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); 1725 } 1726 1727 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) 1728 { 1729 struct dw_pcie *pci = &pcie->pci; 1730 struct dw_pcie_ep *ep = &pci->ep; 1731 struct device *dev = pcie->dev; 1732 u32 val; 1733 int ret; 1734 u16 val_16; 1735 1736 if (pcie->ep_state == EP_STATE_ENABLED) 1737 return; 1738 1739 ret = pm_runtime_resume_and_get(dev); 1740 if (ret < 0) { 1741 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1742 ret); 1743 return; 1744 } 1745 1746 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1747 if (ret) { 1748 dev_err(pcie->dev, "Failed to enable controller %u: %d\n", 1749 pcie->cid, ret); 1750 goto fail_set_ctrl_state; 1751 } 1752 1753 if (pcie->enable_ext_refclk) { 1754 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1755 if (ret) { 1756 dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", 1757 ret); 1758 goto fail_pll_init; 1759 } 1760 } 1761 1762 ret = clk_prepare_enable(pcie->core_clk); 1763 if (ret) { 1764 dev_err(dev, "Failed to enable core clock: %d\n", ret); 1765 goto fail_core_clk_enable; 1766 } 1767 1768 ret = reset_control_deassert(pcie->core_apb_rst); 1769 if (ret) { 1770 dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); 1771 goto fail_core_apb_rst; 1772 } 1773 1774 ret = tegra_pcie_enable_phy(pcie); 1775 if (ret) { 1776 dev_err(dev, "Failed to enable PHY: %d\n", ret); 1777 goto fail_phy; 1778 } 1779 1780 /* Clear any stale interrupt statuses */ 1781 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 1782 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 1783 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 1784 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 1785 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 1786 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 1787 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 1788 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 1789 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 1790 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 1791 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 1792 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 1793 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 1794 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 1795 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 1796 1797 /* configure this core for EP mode operation */ 1798 val = appl_readl(pcie, APPL_DM_TYPE); 1799 val &= ~APPL_DM_TYPE_MASK; 1800 val |= APPL_DM_TYPE_EP; 1801 appl_writel(pcie, val, APPL_DM_TYPE); 1802 1803 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1804 1805 val = appl_readl(pcie, APPL_CTRL); 1806 val |= APPL_CTRL_SYS_PRE_DET_STATE; 1807 val |= APPL_CTRL_HW_HOT_RST_EN; 1808 appl_writel(pcie, val, APPL_CTRL); 1809 1810 val = appl_readl(pcie, APPL_CFG_MISC); 1811 val |= APPL_CFG_MISC_SLV_EP_MODE; 1812 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1813 appl_writel(pcie, val, APPL_CFG_MISC); 1814 1815 val = appl_readl(pcie, APPL_PINMUX); 1816 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1817 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1818 appl_writel(pcie, val, APPL_PINMUX); 1819 1820 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1821 APPL_CFG_BASE_ADDR); 1822 1823 appl_writel(pcie, pcie->atu_dma_res->start & 1824 APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1825 APPL_CFG_IATU_DMA_BASE_ADDR); 1826 1827 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 1828 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 1829 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 1830 val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; 1831 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 1832 1833 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 1834 val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; 1835 val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; 1836 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 1837 1838 reset_control_deassert(pcie->core_rst); 1839 1840 if (pcie->update_fc_fixup) { 1841 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 1842 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 1843 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 1844 } 1845 1846 config_gen3_gen4_eq_presets(pcie); 1847 1848 init_host_aspm(pcie); 1849 1850 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ 1851 if (!pcie->supports_clkreq) { 1852 disable_aspm_l11(pcie); 1853 disable_aspm_l12(pcie); 1854 } 1855 1856 if (!pcie->of_data->has_l1ss_exit_fix) { 1857 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1858 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1859 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1860 } 1861 1862 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 1863 PCI_CAP_ID_EXP); 1864 1865 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); 1866 val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 1867 val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; 1868 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); 1869 1870 /* Clear Slot Clock Configuration bit if SRNS configuration */ 1871 if (pcie->enable_srns) { 1872 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 1873 PCI_EXP_LNKSTA); 1874 val_16 &= ~PCI_EXP_LNKSTA_SLC; 1875 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 1876 val_16); 1877 } 1878 1879 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 1880 1881 val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); 1882 val |= MSIX_ADDR_MATCH_LOW_OFF_EN; 1883 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); 1884 val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); 1885 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); 1886 1887 ret = dw_pcie_ep_init_complete(ep); 1888 if (ret) { 1889 dev_err(dev, "Failed to complete initialization: %d\n", ret); 1890 goto fail_init_complete; 1891 } 1892 1893 dw_pcie_ep_init_notify(ep); 1894 1895 /* Program the private control to allow sending LTR upstream */ 1896 if (pcie->of_data->has_ltr_req_fix) { 1897 val = appl_readl(pcie, APPL_LTR_MSG_2); 1898 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 1899 appl_writel(pcie, val, APPL_LTR_MSG_2); 1900 } 1901 1902 /* Enable LTSSM */ 1903 val = appl_readl(pcie, APPL_CTRL); 1904 val |= APPL_CTRL_LTSSM_EN; 1905 appl_writel(pcie, val, APPL_CTRL); 1906 1907 pcie->ep_state = EP_STATE_ENABLED; 1908 dev_dbg(dev, "Initialization of endpoint is completed\n"); 1909 1910 return; 1911 1912 fail_init_complete: 1913 reset_control_assert(pcie->core_rst); 1914 tegra_pcie_disable_phy(pcie); 1915 fail_phy: 1916 reset_control_assert(pcie->core_apb_rst); 1917 fail_core_apb_rst: 1918 clk_disable_unprepare(pcie->core_clk); 1919 fail_core_clk_enable: 1920 tegra_pcie_bpmp_set_pll_state(pcie, false); 1921 fail_pll_init: 1922 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1923 fail_set_ctrl_state: 1924 pm_runtime_put_sync(dev); 1925 } 1926 1927 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) 1928 { 1929 struct tegra_pcie_dw *pcie = arg; 1930 1931 if (gpiod_get_value(pcie->pex_rst_gpiod)) 1932 pex_ep_event_pex_rst_assert(pcie); 1933 else 1934 pex_ep_event_pex_rst_deassert(pcie); 1935 1936 return IRQ_HANDLED; 1937 } 1938 1939 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) 1940 { 1941 /* Tegra194 supports only INTA */ 1942 if (irq > 1) 1943 return -EINVAL; 1944 1945 appl_writel(pcie, 1, APPL_LEGACY_INTX); 1946 usleep_range(1000, 2000); 1947 appl_writel(pcie, 0, APPL_LEGACY_INTX); 1948 return 0; 1949 } 1950 1951 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) 1952 { 1953 if (unlikely(irq > 31)) 1954 return -EINVAL; 1955 1956 appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); 1957 1958 return 0; 1959 } 1960 1961 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) 1962 { 1963 struct dw_pcie_ep *ep = &pcie->pci.ep; 1964 1965 writel(irq, ep->msi_mem); 1966 1967 return 0; 1968 } 1969 1970 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1971 enum pci_epc_irq_type type, 1972 u16 interrupt_num) 1973 { 1974 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1975 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1976 1977 switch (type) { 1978 case PCI_EPC_IRQ_LEGACY: 1979 return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); 1980 1981 case PCI_EPC_IRQ_MSI: 1982 return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); 1983 1984 case PCI_EPC_IRQ_MSIX: 1985 return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); 1986 1987 default: 1988 dev_err(pci->dev, "Unknown IRQ type\n"); 1989 return -EPERM; 1990 } 1991 1992 return 0; 1993 } 1994 1995 static const struct pci_epc_features tegra_pcie_epc_features = { 1996 .linkup_notifier = true, 1997 .core_init_notifier = true, 1998 .msi_capable = false, 1999 .msix_capable = false, 2000 .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, 2001 .bar_fixed_64bit = 1 << BAR_0, 2002 .bar_fixed_size[0] = SZ_1M, 2003 }; 2004 2005 static const struct pci_epc_features* 2006 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) 2007 { 2008 return &tegra_pcie_epc_features; 2009 } 2010 2011 static const struct dw_pcie_ep_ops pcie_ep_ops = { 2012 .raise_irq = tegra_pcie_ep_raise_irq, 2013 .get_features = tegra_pcie_ep_get_features, 2014 }; 2015 2016 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, 2017 struct platform_device *pdev) 2018 { 2019 struct dw_pcie *pci = &pcie->pci; 2020 struct device *dev = pcie->dev; 2021 struct dw_pcie_ep *ep; 2022 char *name; 2023 int ret; 2024 2025 ep = &pci->ep; 2026 ep->ops = &pcie_ep_ops; 2027 2028 ep->page_size = SZ_64K; 2029 2030 ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); 2031 if (ret < 0) { 2032 dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", 2033 ret); 2034 return ret; 2035 } 2036 2037 ret = gpiod_to_irq(pcie->pex_rst_gpiod); 2038 if (ret < 0) { 2039 dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); 2040 return ret; 2041 } 2042 pcie->pex_rst_irq = (unsigned int)ret; 2043 2044 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", 2045 pcie->cid); 2046 if (!name) { 2047 dev_err(dev, "Failed to create PERST IRQ string\n"); 2048 return -ENOMEM; 2049 } 2050 2051 irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); 2052 2053 pcie->ep_state = EP_STATE_DISABLED; 2054 2055 ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, 2056 tegra_pcie_ep_pex_rst_irq, 2057 IRQF_TRIGGER_RISING | 2058 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2059 name, (void *)pcie); 2060 if (ret < 0) { 2061 dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); 2062 return ret; 2063 } 2064 2065 pm_runtime_enable(dev); 2066 2067 ret = dw_pcie_ep_init(ep); 2068 if (ret) { 2069 dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", 2070 ret); 2071 pm_runtime_disable(dev); 2072 return ret; 2073 } 2074 2075 return 0; 2076 } 2077 2078 static int tegra_pcie_dw_probe(struct platform_device *pdev) 2079 { 2080 const struct tegra_pcie_dw_of_data *data; 2081 struct device *dev = &pdev->dev; 2082 struct resource *atu_dma_res; 2083 struct tegra_pcie_dw *pcie; 2084 struct dw_pcie_rp *pp; 2085 struct dw_pcie *pci; 2086 struct phy **phys; 2087 char *name; 2088 int ret; 2089 u32 i; 2090 2091 data = of_device_get_match_data(dev); 2092 2093 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 2094 if (!pcie) 2095 return -ENOMEM; 2096 2097 pci = &pcie->pci; 2098 pci->dev = &pdev->dev; 2099 pci->ops = &tegra_dw_pcie_ops; 2100 pcie->dev = &pdev->dev; 2101 pcie->of_data = (struct tegra_pcie_dw_of_data *)data; 2102 pci->n_fts[0] = pcie->of_data->n_fts[0]; 2103 pci->n_fts[1] = pcie->of_data->n_fts[1]; 2104 pp = &pci->pp; 2105 pp->num_vectors = MAX_MSI_IRQS; 2106 2107 ret = tegra_pcie_dw_parse_dt(pcie); 2108 if (ret < 0) { 2109 const char *level = KERN_ERR; 2110 2111 if (ret == -EPROBE_DEFER) 2112 level = KERN_DEBUG; 2113 2114 dev_printk(level, dev, 2115 dev_fmt("Failed to parse device tree: %d\n"), 2116 ret); 2117 return ret; 2118 } 2119 2120 ret = tegra_pcie_get_slot_regulators(pcie); 2121 if (ret < 0) { 2122 const char *level = KERN_ERR; 2123 2124 if (ret == -EPROBE_DEFER) 2125 level = KERN_DEBUG; 2126 2127 dev_printk(level, dev, 2128 dev_fmt("Failed to get slot regulators: %d\n"), 2129 ret); 2130 return ret; 2131 } 2132 2133 if (pcie->pex_refclk_sel_gpiod) 2134 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); 2135 2136 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); 2137 if (IS_ERR(pcie->pex_ctl_supply)) { 2138 ret = PTR_ERR(pcie->pex_ctl_supply); 2139 if (ret != -EPROBE_DEFER) 2140 dev_err(dev, "Failed to get regulator: %ld\n", 2141 PTR_ERR(pcie->pex_ctl_supply)); 2142 return ret; 2143 } 2144 2145 pcie->core_clk = devm_clk_get(dev, "core"); 2146 if (IS_ERR(pcie->core_clk)) { 2147 dev_err(dev, "Failed to get core clock: %ld\n", 2148 PTR_ERR(pcie->core_clk)); 2149 return PTR_ERR(pcie->core_clk); 2150 } 2151 2152 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2153 "appl"); 2154 if (!pcie->appl_res) { 2155 dev_err(dev, "Failed to find \"appl\" region\n"); 2156 return -ENODEV; 2157 } 2158 2159 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); 2160 if (IS_ERR(pcie->appl_base)) 2161 return PTR_ERR(pcie->appl_base); 2162 2163 pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); 2164 if (IS_ERR(pcie->core_apb_rst)) { 2165 dev_err(dev, "Failed to get APB reset: %ld\n", 2166 PTR_ERR(pcie->core_apb_rst)); 2167 return PTR_ERR(pcie->core_apb_rst); 2168 } 2169 2170 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); 2171 if (!phys) 2172 return -ENOMEM; 2173 2174 for (i = 0; i < pcie->phy_count; i++) { 2175 name = kasprintf(GFP_KERNEL, "p2u-%u", i); 2176 if (!name) { 2177 dev_err(dev, "Failed to create P2U string\n"); 2178 return -ENOMEM; 2179 } 2180 phys[i] = devm_phy_get(dev, name); 2181 kfree(name); 2182 if (IS_ERR(phys[i])) { 2183 ret = PTR_ERR(phys[i]); 2184 if (ret != -EPROBE_DEFER) 2185 dev_err(dev, "Failed to get PHY: %d\n", ret); 2186 return ret; 2187 } 2188 } 2189 2190 pcie->phys = phys; 2191 2192 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2193 "atu_dma"); 2194 if (!atu_dma_res) { 2195 dev_err(dev, "Failed to find \"atu_dma\" region\n"); 2196 return -ENODEV; 2197 } 2198 pcie->atu_dma_res = atu_dma_res; 2199 2200 pci->atu_size = resource_size(atu_dma_res); 2201 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); 2202 if (IS_ERR(pci->atu_base)) 2203 return PTR_ERR(pci->atu_base); 2204 2205 pcie->core_rst = devm_reset_control_get(dev, "core"); 2206 if (IS_ERR(pcie->core_rst)) { 2207 dev_err(dev, "Failed to get core reset: %ld\n", 2208 PTR_ERR(pcie->core_rst)); 2209 return PTR_ERR(pcie->core_rst); 2210 } 2211 2212 pp->irq = platform_get_irq_byname(pdev, "intr"); 2213 if (pp->irq < 0) 2214 return pp->irq; 2215 2216 pcie->bpmp = tegra_bpmp_get(dev); 2217 if (IS_ERR(pcie->bpmp)) 2218 return PTR_ERR(pcie->bpmp); 2219 2220 platform_set_drvdata(pdev, pcie); 2221 2222 switch (pcie->of_data->mode) { 2223 case DW_PCIE_RC_TYPE: 2224 ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, 2225 IRQF_SHARED, "tegra-pcie-intr", pcie); 2226 if (ret) { 2227 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2228 ret); 2229 goto fail; 2230 } 2231 2232 ret = tegra_pcie_config_rp(pcie); 2233 if (ret && ret != -ENOMEDIUM) 2234 goto fail; 2235 else 2236 return 0; 2237 break; 2238 2239 case DW_PCIE_EP_TYPE: 2240 ret = devm_request_threaded_irq(dev, pp->irq, 2241 tegra_pcie_ep_hard_irq, 2242 tegra_pcie_ep_irq_thread, 2243 IRQF_SHARED | IRQF_ONESHOT, 2244 "tegra-pcie-ep-intr", pcie); 2245 if (ret) { 2246 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2247 ret); 2248 goto fail; 2249 } 2250 2251 ret = tegra_pcie_config_ep(pcie, pdev); 2252 if (ret < 0) 2253 goto fail; 2254 break; 2255 2256 default: 2257 dev_err(dev, "Invalid PCIe device type %d\n", 2258 pcie->of_data->mode); 2259 } 2260 2261 fail: 2262 tegra_bpmp_put(pcie->bpmp); 2263 return ret; 2264 } 2265 2266 static int tegra_pcie_dw_remove(struct platform_device *pdev) 2267 { 2268 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2269 2270 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2271 if (!pcie->link_state) 2272 return 0; 2273 2274 debugfs_remove_recursive(pcie->debugfs); 2275 tegra_pcie_deinit_controller(pcie); 2276 pm_runtime_put_sync(pcie->dev); 2277 } else { 2278 disable_irq(pcie->pex_rst_irq); 2279 pex_ep_event_pex_rst_assert(pcie); 2280 } 2281 2282 pm_runtime_disable(pcie->dev); 2283 tegra_bpmp_put(pcie->bpmp); 2284 if (pcie->pex_refclk_sel_gpiod) 2285 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); 2286 2287 return 0; 2288 } 2289 2290 static int tegra_pcie_dw_suspend_late(struct device *dev) 2291 { 2292 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2293 u32 val; 2294 2295 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2296 dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n"); 2297 return -EPERM; 2298 } 2299 2300 if (!pcie->link_state) 2301 return 0; 2302 2303 /* Enable HW_HOT_RST mode */ 2304 if (!pcie->of_data->has_sbr_reset_fix) { 2305 val = appl_readl(pcie, APPL_CTRL); 2306 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2307 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2308 val |= APPL_CTRL_HW_HOT_RST_EN; 2309 appl_writel(pcie, val, APPL_CTRL); 2310 } 2311 2312 return 0; 2313 } 2314 2315 static int tegra_pcie_dw_suspend_noirq(struct device *dev) 2316 { 2317 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2318 2319 if (!pcie->link_state) 2320 return 0; 2321 2322 tegra_pcie_downstream_dev_to_D0(pcie); 2323 tegra_pcie_dw_pme_turnoff(pcie); 2324 tegra_pcie_unconfig_controller(pcie); 2325 2326 return 0; 2327 } 2328 2329 static int tegra_pcie_dw_resume_noirq(struct device *dev) 2330 { 2331 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2332 int ret; 2333 2334 if (!pcie->link_state) 2335 return 0; 2336 2337 ret = tegra_pcie_config_controller(pcie, true); 2338 if (ret < 0) 2339 return ret; 2340 2341 ret = tegra_pcie_dw_host_init(&pcie->pci.pp); 2342 if (ret < 0) { 2343 dev_err(dev, "Failed to init host: %d\n", ret); 2344 goto fail_host_init; 2345 } 2346 2347 dw_pcie_setup_rc(&pcie->pci.pp); 2348 2349 ret = tegra_pcie_dw_start_link(&pcie->pci); 2350 if (ret < 0) 2351 goto fail_host_init; 2352 2353 return 0; 2354 2355 fail_host_init: 2356 tegra_pcie_unconfig_controller(pcie); 2357 return ret; 2358 } 2359 2360 static int tegra_pcie_dw_resume_early(struct device *dev) 2361 { 2362 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2363 u32 val; 2364 2365 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2366 dev_err(dev, "Suspend is not supported in EP mode"); 2367 return -ENOTSUPP; 2368 } 2369 2370 if (!pcie->link_state) 2371 return 0; 2372 2373 /* Disable HW_HOT_RST mode */ 2374 if (!pcie->of_data->has_sbr_reset_fix) { 2375 val = appl_readl(pcie, APPL_CTRL); 2376 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2377 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2378 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << 2379 APPL_CTRL_HW_HOT_RST_MODE_SHIFT; 2380 val &= ~APPL_CTRL_HW_HOT_RST_EN; 2381 appl_writel(pcie, val, APPL_CTRL); 2382 } 2383 2384 return 0; 2385 } 2386 2387 static void tegra_pcie_dw_shutdown(struct platform_device *pdev) 2388 { 2389 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2390 2391 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2392 if (!pcie->link_state) 2393 return; 2394 2395 debugfs_remove_recursive(pcie->debugfs); 2396 tegra_pcie_downstream_dev_to_D0(pcie); 2397 2398 disable_irq(pcie->pci.pp.irq); 2399 if (IS_ENABLED(CONFIG_PCI_MSI)) 2400 disable_irq(pcie->pci.pp.msi_irq[0]); 2401 2402 tegra_pcie_dw_pme_turnoff(pcie); 2403 tegra_pcie_unconfig_controller(pcie); 2404 pm_runtime_put_sync(pcie->dev); 2405 } else { 2406 disable_irq(pcie->pex_rst_irq); 2407 pex_ep_event_pex_rst_assert(pcie); 2408 } 2409 } 2410 2411 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = { 2412 .version = TEGRA194_DWC_IP_VER, 2413 .mode = DW_PCIE_RC_TYPE, 2414 .cdm_chk_int_en_bit = BIT(19), 2415 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2416 .gen4_preset_vec = 0x360, 2417 .n_fts = { 52, 52 }, 2418 }; 2419 2420 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = { 2421 .version = TEGRA194_DWC_IP_VER, 2422 .mode = DW_PCIE_EP_TYPE, 2423 .cdm_chk_int_en_bit = BIT(19), 2424 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2425 .gen4_preset_vec = 0x360, 2426 .n_fts = { 52, 52 }, 2427 }; 2428 2429 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = { 2430 .version = TEGRA234_DWC_IP_VER, 2431 .mode = DW_PCIE_RC_TYPE, 2432 .has_msix_doorbell_access_fix = true, 2433 .has_sbr_reset_fix = true, 2434 .has_l1ss_exit_fix = true, 2435 .cdm_chk_int_en_bit = BIT(18), 2436 /* Gen4 - 6, 8 and 9 presets enabled */ 2437 .gen4_preset_vec = 0x340, 2438 .n_fts = { 52, 80 }, 2439 }; 2440 2441 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = { 2442 .version = TEGRA234_DWC_IP_VER, 2443 .mode = DW_PCIE_EP_TYPE, 2444 .has_l1ss_exit_fix = true, 2445 .has_ltr_req_fix = true, 2446 .cdm_chk_int_en_bit = BIT(18), 2447 /* Gen4 - 6, 8 and 9 presets enabled */ 2448 .gen4_preset_vec = 0x340, 2449 .n_fts = { 52, 80 }, 2450 }; 2451 2452 static const struct of_device_id tegra_pcie_dw_of_match[] = { 2453 { 2454 .compatible = "nvidia,tegra194-pcie", 2455 .data = &tegra194_pcie_dw_rc_of_data, 2456 }, 2457 { 2458 .compatible = "nvidia,tegra194-pcie-ep", 2459 .data = &tegra194_pcie_dw_ep_of_data, 2460 }, 2461 { 2462 .compatible = "nvidia,tegra234-pcie", 2463 .data = &tegra234_pcie_dw_rc_of_data, 2464 }, 2465 { 2466 .compatible = "nvidia,tegra234-pcie-ep", 2467 .data = &tegra234_pcie_dw_ep_of_data, 2468 }, 2469 {} 2470 }; 2471 2472 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { 2473 .suspend_late = tegra_pcie_dw_suspend_late, 2474 .suspend_noirq = tegra_pcie_dw_suspend_noirq, 2475 .resume_noirq = tegra_pcie_dw_resume_noirq, 2476 .resume_early = tegra_pcie_dw_resume_early, 2477 }; 2478 2479 static struct platform_driver tegra_pcie_dw_driver = { 2480 .probe = tegra_pcie_dw_probe, 2481 .remove = tegra_pcie_dw_remove, 2482 .shutdown = tegra_pcie_dw_shutdown, 2483 .driver = { 2484 .name = "tegra194-pcie", 2485 .pm = &tegra_pcie_dw_pm_ops, 2486 .of_match_table = tegra_pcie_dw_of_match, 2487 }, 2488 }; 2489 module_platform_driver(tegra_pcie_dw_driver); 2490 2491 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); 2492 2493 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); 2494 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); 2495 MODULE_LICENSE("GPL v2"); 2496