1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for the following SoCs 4 * Tegra194 5 * Tegra234 6 * 7 * Copyright (C) 2019-2022 NVIDIA Corporation. 8 * 9 * Author: Vidya Sagar <vidyas@nvidia.com> 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/clk.h> 14 #include <linux/debugfs.h> 15 #include <linux/delay.h> 16 #include <linux/gpio.h> 17 #include <linux/gpio/consumer.h> 18 #include <linux/interconnect.h> 19 #include <linux/interrupt.h> 20 #include <linux/iopoll.h> 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_gpio.h> 25 #include <linux/of_pci.h> 26 #include <linux/pci.h> 27 #include <linux/phy/phy.h> 28 #include <linux/pinctrl/consumer.h> 29 #include <linux/platform_device.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/random.h> 32 #include <linux/reset.h> 33 #include <linux/resource.h> 34 #include <linux/types.h> 35 #include "pcie-designware.h" 36 #include <soc/tegra/bpmp.h> 37 #include <soc/tegra/bpmp-abi.h> 38 #include "../../pci.h" 39 40 #define TEGRA194_DWC_IP_VER 0x490A 41 #define TEGRA234_DWC_IP_VER 0x562A 42 43 #define APPL_PINMUX 0x0 44 #define APPL_PINMUX_PEX_RST BIT(0) 45 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) 46 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) 47 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) 48 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) 49 50 #define APPL_CTRL 0x4 51 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) 52 #define APPL_CTRL_LTSSM_EN BIT(7) 53 #define APPL_CTRL_HW_HOT_RST_EN BIT(20) 54 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) 55 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 56 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 57 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2 58 59 #define APPL_INTR_EN_L0_0 0x8 60 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) 61 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) 62 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) 63 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) 64 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) 65 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) 66 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) 67 68 #define APPL_INTR_STATUS_L0 0xC 69 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) 70 #define APPL_INTR_STATUS_L0_INT_INT BIT(8) 71 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) 72 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) 73 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) 74 75 #define APPL_INTR_EN_L1_0_0 0x1C 76 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) 77 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) 78 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) 79 80 #define APPL_INTR_STATUS_L1_0_0 0x20 81 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) 82 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) 83 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) 84 85 #define APPL_INTR_STATUS_L1_1 0x2C 86 #define APPL_INTR_STATUS_L1_2 0x30 87 #define APPL_INTR_STATUS_L1_3 0x34 88 #define APPL_INTR_STATUS_L1_6 0x3C 89 #define APPL_INTR_STATUS_L1_7 0x40 90 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) 91 92 #define APPL_INTR_EN_L1_8_0 0x44 93 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) 94 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) 95 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11) 96 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) 97 98 #define APPL_INTR_STATUS_L1_8_0 0x4C 99 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) 100 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) 101 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) 102 103 #define APPL_INTR_STATUS_L1_9 0x54 104 #define APPL_INTR_STATUS_L1_10 0x58 105 #define APPL_INTR_STATUS_L1_11 0x64 106 #define APPL_INTR_STATUS_L1_13 0x74 107 #define APPL_INTR_STATUS_L1_14 0x78 108 #define APPL_INTR_STATUS_L1_15 0x7C 109 #define APPL_INTR_STATUS_L1_17 0x88 110 111 #define APPL_INTR_EN_L1_18 0x90 112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) 113 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 114 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 115 116 #define APPL_INTR_STATUS_L1_18 0x94 117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) 118 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 119 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 120 121 #define APPL_MSI_CTRL_1 0xAC 122 123 #define APPL_MSI_CTRL_2 0xB0 124 125 #define APPL_LEGACY_INTX 0xB8 126 127 #define APPL_LTR_MSG_1 0xC4 128 #define LTR_MSG_REQ BIT(15) 129 #define LTR_MST_NO_SNOOP_SHIFT 16 130 131 #define APPL_LTR_MSG_2 0xC8 132 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) 133 134 #define APPL_LINK_STATUS 0xCC 135 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) 136 137 #define APPL_DEBUG 0xD0 138 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) 139 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11 140 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) 141 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 142 #define LTSSM_STATE_PRE_DETECT 5 143 144 #define APPL_RADM_STATUS 0xE4 145 #define APPL_PM_XMT_TURNOFF_STATE BIT(0) 146 147 #define APPL_DM_TYPE 0x100 148 #define APPL_DM_TYPE_MASK GENMASK(3, 0) 149 #define APPL_DM_TYPE_RP 0x4 150 #define APPL_DM_TYPE_EP 0x0 151 152 #define APPL_CFG_BASE_ADDR 0x104 153 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) 154 155 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 156 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) 157 158 #define APPL_CFG_MISC 0x110 159 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14) 160 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) 161 #define APPL_CFG_MISC_ARCACHE_SHIFT 10 162 #define APPL_CFG_MISC_ARCACHE_VAL 3 163 164 #define APPL_CFG_SLCG_OVERRIDE 0x114 165 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) 166 167 #define APPL_CAR_RESET_OVRD 0x12C 168 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) 169 170 #define IO_BASE_IO_DECODE BIT(0) 171 #define IO_BASE_IO_DECODE_BIT8 BIT(8) 172 173 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) 174 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) 175 176 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 177 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) 178 179 #define N_FTS_VAL 52 180 #define FTS_VAL 52 181 182 #define GEN3_EQ_CONTROL_OFF 0x8a8 183 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 184 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) 185 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) 186 187 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 188 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 189 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) 190 #define AMBA_ERROR_RESPONSE_CRS_OKAY 0 191 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 192 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 193 194 #define MSIX_ADDR_MATCH_LOW_OFF 0x940 195 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) 196 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) 197 198 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944 199 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) 200 201 #define PORT_LOGIC_MSIX_DOORBELL 0x948 202 203 #define CAP_SPCIE_CAP_OFF 0x154 204 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) 205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) 206 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 207 208 #define PME_ACK_TIMEOUT 10000 209 210 #define LTSSM_TIMEOUT 50000 /* 50ms */ 211 212 #define GEN3_GEN4_EQ_PRESET_INIT 5 213 214 #define GEN1_CORE_CLK_FREQ 62500000 215 #define GEN2_CORE_CLK_FREQ 125000000 216 #define GEN3_CORE_CLK_FREQ 250000000 217 #define GEN4_CORE_CLK_FREQ 500000000 218 219 #define LTR_MSG_TIMEOUT (100 * 1000) 220 221 #define PERST_DEBOUNCE_TIME (5 * 1000) 222 223 #define EP_STATE_DISABLED 0 224 #define EP_STATE_ENABLED 1 225 226 static const unsigned int pcie_gen_freq[] = { 227 GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ 228 GEN1_CORE_CLK_FREQ, 229 GEN2_CORE_CLK_FREQ, 230 GEN3_CORE_CLK_FREQ, 231 GEN4_CORE_CLK_FREQ 232 }; 233 234 struct tegra_pcie_dw_of_data { 235 u32 version; 236 enum dw_pcie_device_mode mode; 237 bool has_msix_doorbell_access_fix; 238 bool has_sbr_reset_fix; 239 bool has_l1ss_exit_fix; 240 bool has_ltr_req_fix; 241 u32 cdm_chk_int_en_bit; 242 u32 gen4_preset_vec; 243 u8 n_fts[2]; 244 }; 245 246 struct tegra_pcie_dw { 247 struct device *dev; 248 struct resource *appl_res; 249 struct resource *dbi_res; 250 struct resource *atu_dma_res; 251 void __iomem *appl_base; 252 struct clk *core_clk; 253 struct reset_control *core_apb_rst; 254 struct reset_control *core_rst; 255 struct dw_pcie pci; 256 struct tegra_bpmp *bpmp; 257 258 struct tegra_pcie_dw_of_data *of_data; 259 260 bool supports_clkreq; 261 bool enable_cdm_check; 262 bool enable_srns; 263 bool link_state; 264 bool update_fc_fixup; 265 bool enable_ext_refclk; 266 u8 init_link_width; 267 u32 msi_ctrl_int; 268 u32 num_lanes; 269 u32 cid; 270 u32 cfg_link_cap_l1sub; 271 u32 ras_des_cap; 272 u32 pcie_cap_base; 273 u32 aspm_cmrt; 274 u32 aspm_pwr_on_t; 275 u32 aspm_l0s_enter_lat; 276 277 struct regulator *pex_ctl_supply; 278 struct regulator *slot_ctl_3v3; 279 struct regulator *slot_ctl_12v; 280 281 unsigned int phy_count; 282 struct phy **phys; 283 284 struct dentry *debugfs; 285 286 /* Endpoint mode specific */ 287 struct gpio_desc *pex_rst_gpiod; 288 struct gpio_desc *pex_refclk_sel_gpiod; 289 unsigned int pex_rst_irq; 290 int ep_state; 291 long link_status; 292 struct icc_path *icc_path; 293 }; 294 295 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) 296 { 297 return container_of(pci, struct tegra_pcie_dw, pci); 298 } 299 300 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, 301 const u32 reg) 302 { 303 writel_relaxed(value, pcie->appl_base + reg); 304 } 305 306 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) 307 { 308 return readl_relaxed(pcie->appl_base + reg); 309 } 310 311 struct tegra_pcie_soc { 312 enum dw_pcie_device_mode mode; 313 }; 314 315 static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie) 316 { 317 struct dw_pcie *pci = &pcie->pci; 318 u32 val, speed, width; 319 320 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 321 322 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val); 323 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val); 324 325 val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE); 326 327 if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0)) 328 dev_err(pcie->dev, "can't set bw[%u]\n", val); 329 330 if (speed >= ARRAY_SIZE(pcie_gen_freq)) 331 speed = 0; 332 333 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); 334 } 335 336 static void apply_bad_link_workaround(struct dw_pcie_rp *pp) 337 { 338 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 339 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 340 u32 current_link_width; 341 u16 val; 342 343 /* 344 * NOTE:- Since this scenario is uncommon and link as such is not 345 * stable anyway, not waiting to confirm if link is really 346 * transitioning to Gen-2 speed 347 */ 348 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 349 if (val & PCI_EXP_LNKSTA_LBMS) { 350 current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val); 351 if (pcie->init_link_width > current_link_width) { 352 dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); 353 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 354 PCI_EXP_LNKCTL2); 355 val &= ~PCI_EXP_LNKCTL2_TLS; 356 val |= PCI_EXP_LNKCTL2_TLS_2_5GT; 357 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 358 PCI_EXP_LNKCTL2, val); 359 360 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 361 PCI_EXP_LNKCTL); 362 val |= PCI_EXP_LNKCTL_RL; 363 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 364 PCI_EXP_LNKCTL, val); 365 } 366 } 367 } 368 369 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) 370 { 371 struct tegra_pcie_dw *pcie = arg; 372 struct dw_pcie *pci = &pcie->pci; 373 struct dw_pcie_rp *pp = &pci->pp; 374 u32 val, status_l0, status_l1; 375 u16 val_w; 376 377 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 378 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 379 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 380 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 381 if (!pcie->of_data->has_sbr_reset_fix && 382 status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { 383 /* SBR & Surprise Link Down WAR */ 384 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 385 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 386 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 387 udelay(1); 388 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 389 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 390 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 391 392 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 393 val |= PORT_LOGIC_SPEED_CHANGE; 394 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 395 } 396 } 397 398 if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { 399 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); 400 if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { 401 appl_writel(pcie, 402 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, 403 APPL_INTR_STATUS_L1_8_0); 404 apply_bad_link_workaround(pp); 405 } 406 if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { 407 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 408 PCI_EXP_LNKSTA); 409 val_w |= PCI_EXP_LNKSTA_LBMS; 410 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 411 PCI_EXP_LNKSTA, val_w); 412 413 appl_writel(pcie, 414 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, 415 APPL_INTR_STATUS_L1_8_0); 416 417 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 418 PCI_EXP_LNKSTA); 419 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & 420 PCI_EXP_LNKSTA_CLS); 421 } 422 } 423 424 if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { 425 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); 426 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 427 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { 428 dev_info(pci->dev, "CDM check complete\n"); 429 val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; 430 } 431 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { 432 dev_err(pci->dev, "CDM comparison mismatch\n"); 433 val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; 434 } 435 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { 436 dev_err(pci->dev, "CDM Logic error\n"); 437 val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; 438 } 439 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 440 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); 441 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val); 442 } 443 444 return IRQ_HANDLED; 445 } 446 447 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) 448 { 449 u32 val; 450 451 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 452 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 453 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 454 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 455 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 456 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 457 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 458 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 459 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 460 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 461 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 462 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 463 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 464 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 465 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 466 appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); 467 468 val = appl_readl(pcie, APPL_CTRL); 469 val |= APPL_CTRL_LTSSM_EN; 470 appl_writel(pcie, val, APPL_CTRL); 471 } 472 473 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) 474 { 475 struct tegra_pcie_dw *pcie = arg; 476 struct dw_pcie_ep *ep = &pcie->pci.ep; 477 struct dw_pcie *pci = &pcie->pci; 478 u32 val; 479 480 if (test_and_clear_bit(0, &pcie->link_status)) 481 dw_pcie_ep_linkup(ep); 482 483 tegra_pcie_icc_set(pcie); 484 485 if (pcie->of_data->has_ltr_req_fix) 486 return IRQ_HANDLED; 487 488 /* If EP doesn't advertise L1SS, just return */ 489 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 490 if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) 491 return IRQ_HANDLED; 492 493 /* Check if BME is set to '1' */ 494 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 495 if (val & PCI_COMMAND_MASTER) { 496 ktime_t timeout; 497 498 /* 110us for both snoop and no-snoop */ 499 val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; 500 val |= (val << LTR_MST_NO_SNOOP_SHIFT); 501 appl_writel(pcie, val, APPL_LTR_MSG_1); 502 503 /* Send LTR upstream */ 504 val = appl_readl(pcie, APPL_LTR_MSG_2); 505 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 506 appl_writel(pcie, val, APPL_LTR_MSG_2); 507 508 timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); 509 for (;;) { 510 val = appl_readl(pcie, APPL_LTR_MSG_2); 511 if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) 512 break; 513 if (ktime_after(ktime_get(), timeout)) 514 break; 515 usleep_range(1000, 1100); 516 } 517 if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) 518 dev_err(pcie->dev, "Failed to send LTR message\n"); 519 } 520 521 return IRQ_HANDLED; 522 } 523 524 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) 525 { 526 struct tegra_pcie_dw *pcie = arg; 527 int spurious = 1; 528 u32 status_l0, status_l1, link_status; 529 530 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 531 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 532 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 533 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 534 535 if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) 536 pex_ep_event_hot_rst_done(pcie); 537 538 if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { 539 link_status = appl_readl(pcie, APPL_LINK_STATUS); 540 if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) { 541 dev_dbg(pcie->dev, "Link is up with Host\n"); 542 set_bit(0, &pcie->link_status); 543 return IRQ_WAKE_THREAD; 544 } 545 } 546 547 spurious = 0; 548 } 549 550 if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { 551 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15); 552 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15); 553 554 if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) 555 return IRQ_WAKE_THREAD; 556 557 spurious = 0; 558 } 559 560 if (spurious) { 561 dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", 562 status_l0); 563 appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0); 564 } 565 566 return IRQ_HANDLED; 567 } 568 569 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, 570 int size, u32 *val) 571 { 572 struct dw_pcie_rp *pp = bus->sysdata; 573 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 574 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 575 576 /* 577 * This is an endpoint mode specific register happen to appear even 578 * when controller is operating in root port mode and system hangs 579 * when it is accessed with link being in ASPM-L1 state. 580 * So skip accessing it altogether 581 */ 582 if (!pcie->of_data->has_msix_doorbell_access_fix && 583 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { 584 *val = 0x00000000; 585 return PCIBIOS_SUCCESSFUL; 586 } 587 588 return pci_generic_config_read(bus, devfn, where, size, val); 589 } 590 591 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, 592 int size, u32 val) 593 { 594 struct dw_pcie_rp *pp = bus->sysdata; 595 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 596 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 597 598 /* 599 * This is an endpoint mode specific register happen to appear even 600 * when controller is operating in root port mode and system hangs 601 * when it is accessed with link being in ASPM-L1 state. 602 * So skip accessing it altogether 603 */ 604 if (!pcie->of_data->has_msix_doorbell_access_fix && 605 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) 606 return PCIBIOS_SUCCESSFUL; 607 608 return pci_generic_config_write(bus, devfn, where, size, val); 609 } 610 611 static struct pci_ops tegra_pci_ops = { 612 .map_bus = dw_pcie_own_conf_map_bus, 613 .read = tegra_pcie_dw_rd_own_conf, 614 .write = tegra_pcie_dw_wr_own_conf, 615 }; 616 617 #if defined(CONFIG_PCIEASPM) 618 static void disable_aspm_l11(struct tegra_pcie_dw *pcie) 619 { 620 u32 val; 621 622 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 623 val &= ~PCI_L1SS_CAP_ASPM_L1_1; 624 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 625 } 626 627 static void disable_aspm_l12(struct tegra_pcie_dw *pcie) 628 { 629 u32 val; 630 631 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 632 val &= ~PCI_L1SS_CAP_ASPM_L1_2; 633 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 634 } 635 636 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) 637 { 638 u32 val; 639 640 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 641 PCIE_RAS_DES_EVENT_COUNTER_CONTROL); 642 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); 643 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 644 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; 645 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 646 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 647 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 648 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 649 PCIE_RAS_DES_EVENT_COUNTER_DATA); 650 651 return val; 652 } 653 654 static int aspm_state_cnt(struct seq_file *s, void *data) 655 { 656 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) 657 dev_get_drvdata(s->private); 658 u32 val; 659 660 seq_printf(s, "Tx L0s entry count : %u\n", 661 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); 662 663 seq_printf(s, "Rx L0s entry count : %u\n", 664 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); 665 666 seq_printf(s, "Link L1 entry count : %u\n", 667 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); 668 669 seq_printf(s, "Link L1.1 entry count : %u\n", 670 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); 671 672 seq_printf(s, "Link L1.2 entry count : %u\n", 673 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); 674 675 /* Clear all counters */ 676 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 677 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, 678 EVENT_COUNTER_ALL_CLEAR); 679 680 /* Re-enable counting */ 681 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 682 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 683 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 684 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 685 686 return 0; 687 } 688 689 static void init_host_aspm(struct tegra_pcie_dw *pcie) 690 { 691 struct dw_pcie *pci = &pcie->pci; 692 u32 val; 693 694 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); 695 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; 696 697 pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, 698 PCI_EXT_CAP_ID_VNDR); 699 700 /* Enable ASPM counters */ 701 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 702 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 703 dw_pcie_writel_dbi(pci, pcie->ras_des_cap + 704 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 705 706 /* Program T_cmrt and T_pwr_on values */ 707 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 708 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); 709 val |= (pcie->aspm_cmrt << 8); 710 val |= (pcie->aspm_pwr_on_t << 19); 711 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); 712 713 /* Program L0s and L1 entrance latencies */ 714 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); 715 val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; 716 val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); 717 val |= PORT_AFR_ENTER_ASPM; 718 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); 719 } 720 721 static void init_debugfs(struct tegra_pcie_dw *pcie) 722 { 723 debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, 724 aspm_state_cnt); 725 } 726 #else 727 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } 728 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } 729 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } 730 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } 731 #endif 732 733 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) 734 { 735 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 736 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 737 u32 val; 738 u16 val_w; 739 740 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 741 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 742 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 743 744 if (!pcie->of_data->has_sbr_reset_fix) { 745 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 746 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; 747 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 748 } 749 750 if (pcie->enable_cdm_check) { 751 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 752 val |= pcie->of_data->cdm_chk_int_en_bit; 753 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 754 755 val = appl_readl(pcie, APPL_INTR_EN_L1_18); 756 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; 757 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; 758 appl_writel(pcie, val, APPL_INTR_EN_L1_18); 759 } 760 761 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 762 PCI_EXP_LNKSTA); 763 pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w); 764 765 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 766 PCI_EXP_LNKCTL); 767 val_w |= PCI_EXP_LNKCTL_LBMIE; 768 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, 769 val_w); 770 } 771 772 static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp) 773 { 774 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 775 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 776 u32 val; 777 778 /* Enable legacy interrupt generation */ 779 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 780 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 781 val |= APPL_INTR_EN_L0_0_INT_INT_EN; 782 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 783 784 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); 785 val |= APPL_INTR_EN_L1_8_INTX_EN; 786 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; 787 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; 788 if (IS_ENABLED(CONFIG_PCIEAER)) 789 val |= APPL_INTR_EN_L1_8_AER_INT_EN; 790 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); 791 } 792 793 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp) 794 { 795 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 796 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 797 u32 val; 798 799 /* Enable MSI interrupt generation */ 800 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 801 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; 802 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; 803 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 804 } 805 806 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) 807 { 808 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 809 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 810 811 /* Clear interrupt statuses before enabling interrupts */ 812 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 813 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 814 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 815 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 816 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 817 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 818 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 819 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 820 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 821 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 822 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 823 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 824 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 825 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 826 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 827 828 tegra_pcie_enable_system_interrupts(pp); 829 tegra_pcie_enable_legacy_interrupts(pp); 830 if (IS_ENABLED(CONFIG_PCI_MSI)) 831 tegra_pcie_enable_msi_interrupts(pp); 832 } 833 834 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) 835 { 836 struct dw_pcie *pci = &pcie->pci; 837 u32 val, offset, i; 838 839 /* Program init preset */ 840 for (i = 0; i < pcie->num_lanes; i++) { 841 val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2)); 842 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; 843 val |= GEN3_GEN4_EQ_PRESET_INIT; 844 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; 845 val |= (GEN3_GEN4_EQ_PRESET_INIT << 846 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); 847 dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val); 848 849 offset = dw_pcie_find_ext_capability(pci, 850 PCI_EXT_CAP_ID_PL_16GT) + 851 PCI_PL_16GT_LE_CTRL; 852 val = dw_pcie_readb_dbi(pci, offset + i); 853 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; 854 val |= GEN3_GEN4_EQ_PRESET_INIT; 855 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; 856 val |= (GEN3_GEN4_EQ_PRESET_INIT << 857 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); 858 dw_pcie_writeb_dbi(pci, offset + i, val); 859 } 860 861 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 862 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 863 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 864 865 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 866 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 867 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 868 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 869 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 870 871 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 872 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 873 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); 874 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 875 876 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 877 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 878 val |= (pcie->of_data->gen4_preset_vec << 879 GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 880 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 881 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 882 883 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 884 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 885 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 886 } 887 888 static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) 889 { 890 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 891 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 892 u32 val; 893 u16 val_16; 894 895 pp->bridge->ops = &tegra_pci_ops; 896 897 if (!pcie->pcie_cap_base) 898 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 899 PCI_CAP_ID_EXP); 900 901 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); 902 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); 903 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); 904 905 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); 906 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; 907 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; 908 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); 909 910 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 911 912 /* Enable as 0xFFFF0001 response for CRS */ 913 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); 914 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); 915 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << 916 AMBA_ERROR_RESPONSE_CRS_SHIFT); 917 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); 918 919 /* Configure Max lane width from DT */ 920 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); 921 val &= ~PCI_EXP_LNKCAP_MLW; 922 val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes); 923 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); 924 925 /* Clear Slot Clock Configuration bit if SRNS configuration */ 926 if (pcie->enable_srns) { 927 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 928 PCI_EXP_LNKSTA); 929 val_16 &= ~PCI_EXP_LNKSTA_SLC; 930 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 931 val_16); 932 } 933 934 config_gen3_gen4_eq_presets(pcie); 935 936 init_host_aspm(pcie); 937 938 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ 939 if (!pcie->supports_clkreq) { 940 disable_aspm_l11(pcie); 941 disable_aspm_l12(pcie); 942 } 943 944 if (!pcie->of_data->has_l1ss_exit_fix) { 945 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 946 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 947 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 948 } 949 950 if (pcie->update_fc_fixup) { 951 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 952 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 953 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 954 } 955 956 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 957 958 return 0; 959 } 960 961 static int tegra_pcie_dw_start_link(struct dw_pcie *pci) 962 { 963 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 964 struct dw_pcie_rp *pp = &pci->pp; 965 u32 val, offset, tmp; 966 bool retry = true; 967 968 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 969 enable_irq(pcie->pex_rst_irq); 970 return 0; 971 } 972 973 retry_link: 974 /* Assert RST */ 975 val = appl_readl(pcie, APPL_PINMUX); 976 val &= ~APPL_PINMUX_PEX_RST; 977 appl_writel(pcie, val, APPL_PINMUX); 978 979 usleep_range(100, 200); 980 981 /* Enable LTSSM */ 982 val = appl_readl(pcie, APPL_CTRL); 983 val |= APPL_CTRL_LTSSM_EN; 984 appl_writel(pcie, val, APPL_CTRL); 985 986 /* De-assert RST */ 987 val = appl_readl(pcie, APPL_PINMUX); 988 val |= APPL_PINMUX_PEX_RST; 989 appl_writel(pcie, val, APPL_PINMUX); 990 991 msleep(100); 992 993 if (dw_pcie_wait_for_link(pci)) { 994 if (!retry) 995 return 0; 996 /* 997 * There are some endpoints which can't get the link up if 998 * root port has Data Link Feature (DLF) enabled. 999 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info 1000 * on Scaled Flow Control and DLF. 1001 * So, need to confirm that is indeed the case here and attempt 1002 * link up once again with DLF disabled. 1003 */ 1004 val = appl_readl(pcie, APPL_DEBUG); 1005 val &= APPL_DEBUG_LTSSM_STATE_MASK; 1006 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; 1007 tmp = appl_readl(pcie, APPL_LINK_STATUS); 1008 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; 1009 if (!(val == 0x11 && !tmp)) { 1010 /* Link is down for all good reasons */ 1011 return 0; 1012 } 1013 1014 dev_info(pci->dev, "Link is down in DLL"); 1015 dev_info(pci->dev, "Trying again with DLFE disabled\n"); 1016 /* Disable LTSSM */ 1017 val = appl_readl(pcie, APPL_CTRL); 1018 val &= ~APPL_CTRL_LTSSM_EN; 1019 appl_writel(pcie, val, APPL_CTRL); 1020 1021 reset_control_assert(pcie->core_rst); 1022 reset_control_deassert(pcie->core_rst); 1023 1024 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); 1025 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); 1026 val &= ~PCI_DLF_EXCHANGE_ENABLE; 1027 dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); 1028 1029 tegra_pcie_dw_host_init(pp); 1030 dw_pcie_setup_rc(pp); 1031 1032 retry = false; 1033 goto retry_link; 1034 } 1035 1036 tegra_pcie_icc_set(pcie); 1037 1038 tegra_pcie_enable_interrupts(pp); 1039 1040 return 0; 1041 } 1042 1043 static int tegra_pcie_dw_link_up(struct dw_pcie *pci) 1044 { 1045 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1046 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 1047 1048 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1049 } 1050 1051 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) 1052 { 1053 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1054 1055 disable_irq(pcie->pex_rst_irq); 1056 } 1057 1058 static const struct dw_pcie_ops tegra_dw_pcie_ops = { 1059 .link_up = tegra_pcie_dw_link_up, 1060 .start_link = tegra_pcie_dw_start_link, 1061 .stop_link = tegra_pcie_dw_stop_link, 1062 }; 1063 1064 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { 1065 .host_init = tegra_pcie_dw_host_init, 1066 }; 1067 1068 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) 1069 { 1070 unsigned int phy_count = pcie->phy_count; 1071 1072 while (phy_count--) { 1073 phy_power_off(pcie->phys[phy_count]); 1074 phy_exit(pcie->phys[phy_count]); 1075 } 1076 } 1077 1078 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) 1079 { 1080 unsigned int i; 1081 int ret; 1082 1083 for (i = 0; i < pcie->phy_count; i++) { 1084 ret = phy_init(pcie->phys[i]); 1085 if (ret < 0) 1086 goto phy_power_off; 1087 1088 ret = phy_power_on(pcie->phys[i]); 1089 if (ret < 0) 1090 goto phy_exit; 1091 } 1092 1093 return 0; 1094 1095 phy_power_off: 1096 while (i--) { 1097 phy_power_off(pcie->phys[i]); 1098 phy_exit: 1099 phy_exit(pcie->phys[i]); 1100 } 1101 1102 return ret; 1103 } 1104 1105 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) 1106 { 1107 struct platform_device *pdev = to_platform_device(pcie->dev); 1108 struct device_node *np = pcie->dev->of_node; 1109 int ret; 1110 1111 pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 1112 if (!pcie->dbi_res) { 1113 dev_err(pcie->dev, "Failed to find \"dbi\" region\n"); 1114 return -ENODEV; 1115 } 1116 1117 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); 1118 if (ret < 0) { 1119 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); 1120 return ret; 1121 } 1122 1123 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", 1124 &pcie->aspm_pwr_on_t); 1125 if (ret < 0) 1126 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", 1127 ret); 1128 1129 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", 1130 &pcie->aspm_l0s_enter_lat); 1131 if (ret < 0) 1132 dev_info(pcie->dev, 1133 "Failed to read ASPM L0s Entrance latency: %d\n", ret); 1134 1135 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); 1136 if (ret < 0) { 1137 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); 1138 return ret; 1139 } 1140 1141 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); 1142 if (ret) { 1143 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); 1144 return ret; 1145 } 1146 1147 ret = of_property_count_strings(np, "phy-names"); 1148 if (ret < 0) { 1149 dev_err(pcie->dev, "Failed to find PHY entries: %d\n", 1150 ret); 1151 return ret; 1152 } 1153 pcie->phy_count = ret; 1154 1155 if (of_property_read_bool(np, "nvidia,update-fc-fixup")) 1156 pcie->update_fc_fixup = true; 1157 1158 /* RP using an external REFCLK is supported only in Tegra234 */ 1159 if (pcie->of_data->version == TEGRA194_DWC_IP_VER) { 1160 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 1161 pcie->enable_ext_refclk = true; 1162 } else { 1163 pcie->enable_ext_refclk = 1164 of_property_read_bool(pcie->dev->of_node, 1165 "nvidia,enable-ext-refclk"); 1166 } 1167 1168 pcie->supports_clkreq = 1169 of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); 1170 1171 pcie->enable_cdm_check = 1172 of_property_read_bool(np, "snps,enable-cdm-check"); 1173 1174 if (pcie->of_data->version == TEGRA234_DWC_IP_VER) 1175 pcie->enable_srns = 1176 of_property_read_bool(np, "nvidia,enable-srns"); 1177 1178 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) 1179 return 0; 1180 1181 /* Endpoint mode specific DT entries */ 1182 pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); 1183 if (IS_ERR(pcie->pex_rst_gpiod)) { 1184 int err = PTR_ERR(pcie->pex_rst_gpiod); 1185 const char *level = KERN_ERR; 1186 1187 if (err == -EPROBE_DEFER) 1188 level = KERN_DEBUG; 1189 1190 dev_printk(level, pcie->dev, 1191 dev_fmt("Failed to get PERST GPIO: %d\n"), 1192 err); 1193 return err; 1194 } 1195 1196 pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, 1197 "nvidia,refclk-select", 1198 GPIOD_OUT_HIGH); 1199 if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { 1200 int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); 1201 const char *level = KERN_ERR; 1202 1203 if (err == -EPROBE_DEFER) 1204 level = KERN_DEBUG; 1205 1206 dev_printk(level, pcie->dev, 1207 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), 1208 err); 1209 pcie->pex_refclk_sel_gpiod = NULL; 1210 } 1211 1212 return 0; 1213 } 1214 1215 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, 1216 bool enable) 1217 { 1218 struct mrq_uphy_response resp; 1219 struct tegra_bpmp_message msg; 1220 struct mrq_uphy_request req; 1221 1222 /* 1223 * Controller-5 doesn't need to have its state set by BPMP-FW in 1224 * Tegra194 1225 */ 1226 if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5) 1227 return 0; 1228 1229 memset(&req, 0, sizeof(req)); 1230 memset(&resp, 0, sizeof(resp)); 1231 1232 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; 1233 req.controller_state.pcie_controller = pcie->cid; 1234 req.controller_state.enable = enable; 1235 1236 memset(&msg, 0, sizeof(msg)); 1237 msg.mrq = MRQ_UPHY; 1238 msg.tx.data = &req; 1239 msg.tx.size = sizeof(req); 1240 msg.rx.data = &resp; 1241 msg.rx.size = sizeof(resp); 1242 1243 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1244 } 1245 1246 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, 1247 bool enable) 1248 { 1249 struct mrq_uphy_response resp; 1250 struct tegra_bpmp_message msg; 1251 struct mrq_uphy_request req; 1252 1253 memset(&req, 0, sizeof(req)); 1254 memset(&resp, 0, sizeof(resp)); 1255 1256 if (enable) { 1257 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; 1258 req.ep_ctrlr_pll_init.ep_controller = pcie->cid; 1259 } else { 1260 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; 1261 req.ep_ctrlr_pll_off.ep_controller = pcie->cid; 1262 } 1263 1264 memset(&msg, 0, sizeof(msg)); 1265 msg.mrq = MRQ_UPHY; 1266 msg.tx.data = &req; 1267 msg.tx.size = sizeof(req); 1268 msg.rx.data = &resp; 1269 msg.rx.size = sizeof(resp); 1270 1271 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1272 } 1273 1274 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) 1275 { 1276 struct dw_pcie_rp *pp = &pcie->pci.pp; 1277 struct pci_bus *child, *root_bus = NULL; 1278 struct pci_dev *pdev; 1279 1280 /* 1281 * link doesn't go into L2 state with some of the endpoints with Tegra 1282 * if they are not in D0 state. So, need to make sure that immediate 1283 * downstream devices are in D0 state before sending PME_TurnOff to put 1284 * link into L2 state. 1285 * This is as per PCI Express Base r4.0 v1.0 September 27-2017, 1286 * 5.2 Link State Power Management (Page #428). 1287 */ 1288 1289 list_for_each_entry(child, &pp->bridge->bus->children, node) { 1290 /* Bring downstream devices to D0 if they are not already in */ 1291 if (child->parent == pp->bridge->bus) { 1292 root_bus = child; 1293 break; 1294 } 1295 } 1296 1297 if (!root_bus) { 1298 dev_err(pcie->dev, "Failed to find downstream devices\n"); 1299 return; 1300 } 1301 1302 list_for_each_entry(pdev, &root_bus->devices, bus_list) { 1303 if (PCI_SLOT(pdev->devfn) == 0) { 1304 if (pci_set_power_state(pdev, PCI_D0)) 1305 dev_err(pcie->dev, 1306 "Failed to transition %s to D0 state\n", 1307 dev_name(&pdev->dev)); 1308 } 1309 } 1310 } 1311 1312 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) 1313 { 1314 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); 1315 if (IS_ERR(pcie->slot_ctl_3v3)) { 1316 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) 1317 return PTR_ERR(pcie->slot_ctl_3v3); 1318 1319 pcie->slot_ctl_3v3 = NULL; 1320 } 1321 1322 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); 1323 if (IS_ERR(pcie->slot_ctl_12v)) { 1324 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) 1325 return PTR_ERR(pcie->slot_ctl_12v); 1326 1327 pcie->slot_ctl_12v = NULL; 1328 } 1329 1330 return 0; 1331 } 1332 1333 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) 1334 { 1335 int ret; 1336 1337 if (pcie->slot_ctl_3v3) { 1338 ret = regulator_enable(pcie->slot_ctl_3v3); 1339 if (ret < 0) { 1340 dev_err(pcie->dev, 1341 "Failed to enable 3.3V slot supply: %d\n", ret); 1342 return ret; 1343 } 1344 } 1345 1346 if (pcie->slot_ctl_12v) { 1347 ret = regulator_enable(pcie->slot_ctl_12v); 1348 if (ret < 0) { 1349 dev_err(pcie->dev, 1350 "Failed to enable 12V slot supply: %d\n", ret); 1351 goto fail_12v_enable; 1352 } 1353 } 1354 1355 /* 1356 * According to PCI Express Card Electromechanical Specification 1357 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) 1358 * should be a minimum of 100ms. 1359 */ 1360 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) 1361 msleep(100); 1362 1363 return 0; 1364 1365 fail_12v_enable: 1366 if (pcie->slot_ctl_3v3) 1367 regulator_disable(pcie->slot_ctl_3v3); 1368 return ret; 1369 } 1370 1371 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) 1372 { 1373 if (pcie->slot_ctl_12v) 1374 regulator_disable(pcie->slot_ctl_12v); 1375 if (pcie->slot_ctl_3v3) 1376 regulator_disable(pcie->slot_ctl_3v3); 1377 } 1378 1379 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, 1380 bool en_hw_hot_rst) 1381 { 1382 int ret; 1383 u32 val; 1384 1385 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1386 if (ret) { 1387 dev_err(pcie->dev, 1388 "Failed to enable controller %u: %d\n", pcie->cid, ret); 1389 return ret; 1390 } 1391 1392 if (pcie->enable_ext_refclk) { 1393 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1394 if (ret) { 1395 dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret); 1396 goto fail_pll_init; 1397 } 1398 } 1399 1400 ret = tegra_pcie_enable_slot_regulators(pcie); 1401 if (ret < 0) 1402 goto fail_slot_reg_en; 1403 1404 ret = regulator_enable(pcie->pex_ctl_supply); 1405 if (ret < 0) { 1406 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); 1407 goto fail_reg_en; 1408 } 1409 1410 ret = clk_prepare_enable(pcie->core_clk); 1411 if (ret) { 1412 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); 1413 goto fail_core_clk; 1414 } 1415 1416 ret = reset_control_deassert(pcie->core_apb_rst); 1417 if (ret) { 1418 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", 1419 ret); 1420 goto fail_core_apb_rst; 1421 } 1422 1423 if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) { 1424 /* Enable HW_HOT_RST mode */ 1425 val = appl_readl(pcie, APPL_CTRL); 1426 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 1427 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1428 val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << 1429 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1430 val |= APPL_CTRL_HW_HOT_RST_EN; 1431 appl_writel(pcie, val, APPL_CTRL); 1432 } 1433 1434 ret = tegra_pcie_enable_phy(pcie); 1435 if (ret) { 1436 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); 1437 goto fail_phy; 1438 } 1439 1440 /* Update CFG base address */ 1441 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1442 APPL_CFG_BASE_ADDR); 1443 1444 /* Configure this core for RP mode operation */ 1445 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); 1446 1447 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1448 1449 val = appl_readl(pcie, APPL_CTRL); 1450 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); 1451 1452 val = appl_readl(pcie, APPL_CFG_MISC); 1453 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1454 appl_writel(pcie, val, APPL_CFG_MISC); 1455 1456 if (pcie->enable_srns || pcie->enable_ext_refclk) { 1457 /* 1458 * When Tegra PCIe RP is using external clock, it cannot supply 1459 * same clock to its downstream hierarchy. Hence, gate PCIe RP 1460 * REFCLK out pads when RP & EP are using separate clocks or RP 1461 * is using an external REFCLK. 1462 */ 1463 val = appl_readl(pcie, APPL_PINMUX); 1464 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1465 val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1466 appl_writel(pcie, val, APPL_PINMUX); 1467 } 1468 1469 if (!pcie->supports_clkreq) { 1470 val = appl_readl(pcie, APPL_PINMUX); 1471 val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; 1472 val &= ~APPL_PINMUX_CLKREQ_OVERRIDE; 1473 appl_writel(pcie, val, APPL_PINMUX); 1474 } 1475 1476 /* Update iATU_DMA base address */ 1477 appl_writel(pcie, 1478 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1479 APPL_CFG_IATU_DMA_BASE_ADDR); 1480 1481 reset_control_deassert(pcie->core_rst); 1482 1483 return ret; 1484 1485 fail_phy: 1486 reset_control_assert(pcie->core_apb_rst); 1487 fail_core_apb_rst: 1488 clk_disable_unprepare(pcie->core_clk); 1489 fail_core_clk: 1490 regulator_disable(pcie->pex_ctl_supply); 1491 fail_reg_en: 1492 tegra_pcie_disable_slot_regulators(pcie); 1493 fail_slot_reg_en: 1494 if (pcie->enable_ext_refclk) 1495 tegra_pcie_bpmp_set_pll_state(pcie, false); 1496 fail_pll_init: 1497 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1498 1499 return ret; 1500 } 1501 1502 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) 1503 { 1504 int ret; 1505 1506 ret = reset_control_assert(pcie->core_rst); 1507 if (ret) 1508 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret); 1509 1510 tegra_pcie_disable_phy(pcie); 1511 1512 ret = reset_control_assert(pcie->core_apb_rst); 1513 if (ret) 1514 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); 1515 1516 clk_disable_unprepare(pcie->core_clk); 1517 1518 ret = regulator_disable(pcie->pex_ctl_supply); 1519 if (ret) 1520 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); 1521 1522 tegra_pcie_disable_slot_regulators(pcie); 1523 1524 if (pcie->enable_ext_refclk) { 1525 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1526 if (ret) 1527 dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret); 1528 } 1529 1530 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1531 if (ret) 1532 dev_err(pcie->dev, "Failed to disable controller %d: %d\n", 1533 pcie->cid, ret); 1534 } 1535 1536 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) 1537 { 1538 struct dw_pcie *pci = &pcie->pci; 1539 struct dw_pcie_rp *pp = &pci->pp; 1540 int ret; 1541 1542 ret = tegra_pcie_config_controller(pcie, false); 1543 if (ret < 0) 1544 return ret; 1545 1546 pp->ops = &tegra_pcie_dw_host_ops; 1547 1548 ret = dw_pcie_host_init(pp); 1549 if (ret < 0) { 1550 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); 1551 goto fail_host_init; 1552 } 1553 1554 return 0; 1555 1556 fail_host_init: 1557 tegra_pcie_unconfig_controller(pcie); 1558 return ret; 1559 } 1560 1561 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) 1562 { 1563 u32 val; 1564 1565 if (!tegra_pcie_dw_link_up(&pcie->pci)) 1566 return 0; 1567 1568 val = appl_readl(pcie, APPL_RADM_STATUS); 1569 val |= APPL_PM_XMT_TURNOFF_STATE; 1570 appl_writel(pcie, val, APPL_RADM_STATUS); 1571 1572 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, 1573 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, 1574 1, PME_ACK_TIMEOUT); 1575 } 1576 1577 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) 1578 { 1579 u32 data; 1580 int err; 1581 1582 if (!tegra_pcie_dw_link_up(&pcie->pci)) { 1583 dev_dbg(pcie->dev, "PCIe link is not up...!\n"); 1584 return; 1585 } 1586 1587 /* 1588 * PCIe controller exits from L2 only if reset is applied, so 1589 * controller doesn't handle interrupts. But in cases where 1590 * L2 entry fails, PERST# is asserted which can trigger surprise 1591 * link down AER. However this function call happens in 1592 * suspend_noirq(), so AER interrupt will not be processed. 1593 * Disable all interrupts to avoid such a scenario. 1594 */ 1595 appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0); 1596 1597 if (tegra_pcie_try_link_l2(pcie)) { 1598 dev_info(pcie->dev, "Link didn't transition to L2 state\n"); 1599 /* 1600 * TX lane clock freq will reset to Gen1 only if link is in L2 1601 * or detect state. 1602 * So apply pex_rst to end point to force RP to go into detect 1603 * state 1604 */ 1605 data = appl_readl(pcie, APPL_PINMUX); 1606 data &= ~APPL_PINMUX_PEX_RST; 1607 appl_writel(pcie, data, APPL_PINMUX); 1608 1609 /* 1610 * Some cards do not go to detect state even after de-asserting 1611 * PERST#. So, de-assert LTSSM to bring link to detect state. 1612 */ 1613 data = readl(pcie->appl_base + APPL_CTRL); 1614 data &= ~APPL_CTRL_LTSSM_EN; 1615 writel(data, pcie->appl_base + APPL_CTRL); 1616 1617 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, 1618 data, 1619 ((data & 1620 APPL_DEBUG_LTSSM_STATE_MASK) >> 1621 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1622 LTSSM_STATE_PRE_DETECT, 1623 1, LTSSM_TIMEOUT); 1624 if (err) 1625 dev_info(pcie->dev, "Link didn't go to detect state\n"); 1626 } 1627 /* 1628 * DBI registers may not be accessible after this as PLL-E would be 1629 * down depending on how CLKREQ is pulled by end point 1630 */ 1631 data = appl_readl(pcie, APPL_PINMUX); 1632 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); 1633 /* Cut REFCLK to slot */ 1634 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1635 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1636 appl_writel(pcie, data, APPL_PINMUX); 1637 } 1638 1639 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) 1640 { 1641 tegra_pcie_downstream_dev_to_D0(pcie); 1642 dw_pcie_host_deinit(&pcie->pci.pp); 1643 tegra_pcie_dw_pme_turnoff(pcie); 1644 tegra_pcie_unconfig_controller(pcie); 1645 } 1646 1647 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) 1648 { 1649 struct device *dev = pcie->dev; 1650 char *name; 1651 int ret; 1652 1653 pm_runtime_enable(dev); 1654 1655 ret = pm_runtime_get_sync(dev); 1656 if (ret < 0) { 1657 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1658 ret); 1659 goto fail_pm_get_sync; 1660 } 1661 1662 ret = pinctrl_pm_select_default_state(dev); 1663 if (ret < 0) { 1664 dev_err(dev, "Failed to configure sideband pins: %d\n", ret); 1665 goto fail_pm_get_sync; 1666 } 1667 1668 ret = tegra_pcie_init_controller(pcie); 1669 if (ret < 0) { 1670 dev_err(dev, "Failed to initialize controller: %d\n", ret); 1671 goto fail_pm_get_sync; 1672 } 1673 1674 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); 1675 if (!pcie->link_state) { 1676 ret = -ENOMEDIUM; 1677 goto fail_host_init; 1678 } 1679 1680 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1681 if (!name) { 1682 ret = -ENOMEM; 1683 goto fail_host_init; 1684 } 1685 1686 pcie->debugfs = debugfs_create_dir(name, NULL); 1687 init_debugfs(pcie); 1688 1689 return ret; 1690 1691 fail_host_init: 1692 tegra_pcie_deinit_controller(pcie); 1693 fail_pm_get_sync: 1694 pm_runtime_put_sync(dev); 1695 pm_runtime_disable(dev); 1696 return ret; 1697 } 1698 1699 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) 1700 { 1701 u32 val; 1702 int ret; 1703 1704 if (pcie->ep_state == EP_STATE_DISABLED) 1705 return; 1706 1707 /* Disable LTSSM */ 1708 val = appl_readl(pcie, APPL_CTRL); 1709 val &= ~APPL_CTRL_LTSSM_EN; 1710 appl_writel(pcie, val, APPL_CTRL); 1711 1712 ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, 1713 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> 1714 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1715 LTSSM_STATE_PRE_DETECT, 1716 1, LTSSM_TIMEOUT); 1717 if (ret) 1718 dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); 1719 1720 reset_control_assert(pcie->core_rst); 1721 1722 tegra_pcie_disable_phy(pcie); 1723 1724 reset_control_assert(pcie->core_apb_rst); 1725 1726 clk_disable_unprepare(pcie->core_clk); 1727 1728 pm_runtime_put_sync(pcie->dev); 1729 1730 if (pcie->enable_ext_refclk) { 1731 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1732 if (ret) 1733 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", 1734 ret); 1735 } 1736 1737 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1738 if (ret) 1739 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); 1740 1741 pcie->ep_state = EP_STATE_DISABLED; 1742 dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); 1743 } 1744 1745 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) 1746 { 1747 struct dw_pcie *pci = &pcie->pci; 1748 struct dw_pcie_ep *ep = &pci->ep; 1749 struct device *dev = pcie->dev; 1750 u32 val; 1751 int ret; 1752 u16 val_16; 1753 1754 if (pcie->ep_state == EP_STATE_ENABLED) 1755 return; 1756 1757 ret = pm_runtime_resume_and_get(dev); 1758 if (ret < 0) { 1759 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1760 ret); 1761 return; 1762 } 1763 1764 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1765 if (ret) { 1766 dev_err(pcie->dev, "Failed to enable controller %u: %d\n", 1767 pcie->cid, ret); 1768 goto fail_set_ctrl_state; 1769 } 1770 1771 if (pcie->enable_ext_refclk) { 1772 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1773 if (ret) { 1774 dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", 1775 ret); 1776 goto fail_pll_init; 1777 } 1778 } 1779 1780 ret = clk_prepare_enable(pcie->core_clk); 1781 if (ret) { 1782 dev_err(dev, "Failed to enable core clock: %d\n", ret); 1783 goto fail_core_clk_enable; 1784 } 1785 1786 ret = reset_control_deassert(pcie->core_apb_rst); 1787 if (ret) { 1788 dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); 1789 goto fail_core_apb_rst; 1790 } 1791 1792 ret = tegra_pcie_enable_phy(pcie); 1793 if (ret) { 1794 dev_err(dev, "Failed to enable PHY: %d\n", ret); 1795 goto fail_phy; 1796 } 1797 1798 /* Clear any stale interrupt statuses */ 1799 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 1800 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 1801 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 1802 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 1803 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 1804 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 1805 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 1806 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 1807 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 1808 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 1809 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 1810 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 1811 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 1812 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 1813 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 1814 1815 /* configure this core for EP mode operation */ 1816 val = appl_readl(pcie, APPL_DM_TYPE); 1817 val &= ~APPL_DM_TYPE_MASK; 1818 val |= APPL_DM_TYPE_EP; 1819 appl_writel(pcie, val, APPL_DM_TYPE); 1820 1821 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1822 1823 val = appl_readl(pcie, APPL_CTRL); 1824 val |= APPL_CTRL_SYS_PRE_DET_STATE; 1825 val |= APPL_CTRL_HW_HOT_RST_EN; 1826 appl_writel(pcie, val, APPL_CTRL); 1827 1828 val = appl_readl(pcie, APPL_CFG_MISC); 1829 val |= APPL_CFG_MISC_SLV_EP_MODE; 1830 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1831 appl_writel(pcie, val, APPL_CFG_MISC); 1832 1833 val = appl_readl(pcie, APPL_PINMUX); 1834 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1835 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1836 appl_writel(pcie, val, APPL_PINMUX); 1837 1838 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1839 APPL_CFG_BASE_ADDR); 1840 1841 appl_writel(pcie, pcie->atu_dma_res->start & 1842 APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1843 APPL_CFG_IATU_DMA_BASE_ADDR); 1844 1845 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 1846 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 1847 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 1848 val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; 1849 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 1850 1851 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 1852 val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; 1853 val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; 1854 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 1855 1856 reset_control_deassert(pcie->core_rst); 1857 1858 if (pcie->update_fc_fixup) { 1859 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 1860 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 1861 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 1862 } 1863 1864 config_gen3_gen4_eq_presets(pcie); 1865 1866 init_host_aspm(pcie); 1867 1868 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ 1869 if (!pcie->supports_clkreq) { 1870 disable_aspm_l11(pcie); 1871 disable_aspm_l12(pcie); 1872 } 1873 1874 if (!pcie->of_data->has_l1ss_exit_fix) { 1875 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1876 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1877 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1878 } 1879 1880 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 1881 PCI_CAP_ID_EXP); 1882 1883 /* Clear Slot Clock Configuration bit if SRNS configuration */ 1884 if (pcie->enable_srns) { 1885 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 1886 PCI_EXP_LNKSTA); 1887 val_16 &= ~PCI_EXP_LNKSTA_SLC; 1888 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 1889 val_16); 1890 } 1891 1892 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 1893 1894 val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); 1895 val |= MSIX_ADDR_MATCH_LOW_OFF_EN; 1896 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); 1897 val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); 1898 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); 1899 1900 ret = dw_pcie_ep_init_complete(ep); 1901 if (ret) { 1902 dev_err(dev, "Failed to complete initialization: %d\n", ret); 1903 goto fail_init_complete; 1904 } 1905 1906 dw_pcie_ep_init_notify(ep); 1907 1908 /* Program the private control to allow sending LTR upstream */ 1909 if (pcie->of_data->has_ltr_req_fix) { 1910 val = appl_readl(pcie, APPL_LTR_MSG_2); 1911 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 1912 appl_writel(pcie, val, APPL_LTR_MSG_2); 1913 } 1914 1915 /* Enable LTSSM */ 1916 val = appl_readl(pcie, APPL_CTRL); 1917 val |= APPL_CTRL_LTSSM_EN; 1918 appl_writel(pcie, val, APPL_CTRL); 1919 1920 pcie->ep_state = EP_STATE_ENABLED; 1921 dev_dbg(dev, "Initialization of endpoint is completed\n"); 1922 1923 return; 1924 1925 fail_init_complete: 1926 reset_control_assert(pcie->core_rst); 1927 tegra_pcie_disable_phy(pcie); 1928 fail_phy: 1929 reset_control_assert(pcie->core_apb_rst); 1930 fail_core_apb_rst: 1931 clk_disable_unprepare(pcie->core_clk); 1932 fail_core_clk_enable: 1933 tegra_pcie_bpmp_set_pll_state(pcie, false); 1934 fail_pll_init: 1935 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1936 fail_set_ctrl_state: 1937 pm_runtime_put_sync(dev); 1938 } 1939 1940 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) 1941 { 1942 struct tegra_pcie_dw *pcie = arg; 1943 1944 if (gpiod_get_value(pcie->pex_rst_gpiod)) 1945 pex_ep_event_pex_rst_assert(pcie); 1946 else 1947 pex_ep_event_pex_rst_deassert(pcie); 1948 1949 return IRQ_HANDLED; 1950 } 1951 1952 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) 1953 { 1954 /* Tegra194 supports only INTA */ 1955 if (irq > 1) 1956 return -EINVAL; 1957 1958 appl_writel(pcie, 1, APPL_LEGACY_INTX); 1959 usleep_range(1000, 2000); 1960 appl_writel(pcie, 0, APPL_LEGACY_INTX); 1961 return 0; 1962 } 1963 1964 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) 1965 { 1966 if (unlikely(irq > 31)) 1967 return -EINVAL; 1968 1969 appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); 1970 1971 return 0; 1972 } 1973 1974 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) 1975 { 1976 struct dw_pcie_ep *ep = &pcie->pci.ep; 1977 1978 writel(irq, ep->msi_mem); 1979 1980 return 0; 1981 } 1982 1983 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1984 enum pci_epc_irq_type type, 1985 u16 interrupt_num) 1986 { 1987 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1988 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1989 1990 switch (type) { 1991 case PCI_EPC_IRQ_LEGACY: 1992 return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); 1993 1994 case PCI_EPC_IRQ_MSI: 1995 return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); 1996 1997 case PCI_EPC_IRQ_MSIX: 1998 return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); 1999 2000 default: 2001 dev_err(pci->dev, "Unknown IRQ type\n"); 2002 return -EPERM; 2003 } 2004 2005 return 0; 2006 } 2007 2008 static const struct pci_epc_features tegra_pcie_epc_features = { 2009 .linkup_notifier = true, 2010 .core_init_notifier = true, 2011 .msi_capable = false, 2012 .msix_capable = false, 2013 .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, 2014 .bar_fixed_64bit = 1 << BAR_0, 2015 .bar_fixed_size[0] = SZ_1M, 2016 }; 2017 2018 static const struct pci_epc_features* 2019 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) 2020 { 2021 return &tegra_pcie_epc_features; 2022 } 2023 2024 static const struct dw_pcie_ep_ops pcie_ep_ops = { 2025 .raise_irq = tegra_pcie_ep_raise_irq, 2026 .get_features = tegra_pcie_ep_get_features, 2027 }; 2028 2029 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, 2030 struct platform_device *pdev) 2031 { 2032 struct dw_pcie *pci = &pcie->pci; 2033 struct device *dev = pcie->dev; 2034 struct dw_pcie_ep *ep; 2035 char *name; 2036 int ret; 2037 2038 ep = &pci->ep; 2039 ep->ops = &pcie_ep_ops; 2040 2041 ep->page_size = SZ_64K; 2042 2043 ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); 2044 if (ret < 0) { 2045 dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", 2046 ret); 2047 return ret; 2048 } 2049 2050 ret = gpiod_to_irq(pcie->pex_rst_gpiod); 2051 if (ret < 0) { 2052 dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); 2053 return ret; 2054 } 2055 pcie->pex_rst_irq = (unsigned int)ret; 2056 2057 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", 2058 pcie->cid); 2059 if (!name) { 2060 dev_err(dev, "Failed to create PERST IRQ string\n"); 2061 return -ENOMEM; 2062 } 2063 2064 irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); 2065 2066 pcie->ep_state = EP_STATE_DISABLED; 2067 2068 ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, 2069 tegra_pcie_ep_pex_rst_irq, 2070 IRQF_TRIGGER_RISING | 2071 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2072 name, (void *)pcie); 2073 if (ret < 0) { 2074 dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); 2075 return ret; 2076 } 2077 2078 pm_runtime_enable(dev); 2079 2080 ret = dw_pcie_ep_init(ep); 2081 if (ret) { 2082 dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", 2083 ret); 2084 pm_runtime_disable(dev); 2085 return ret; 2086 } 2087 2088 return 0; 2089 } 2090 2091 static int tegra_pcie_dw_probe(struct platform_device *pdev) 2092 { 2093 const struct tegra_pcie_dw_of_data *data; 2094 struct device *dev = &pdev->dev; 2095 struct resource *atu_dma_res; 2096 struct tegra_pcie_dw *pcie; 2097 struct dw_pcie_rp *pp; 2098 struct dw_pcie *pci; 2099 struct phy **phys; 2100 char *name; 2101 int ret; 2102 u32 i; 2103 2104 data = of_device_get_match_data(dev); 2105 2106 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 2107 if (!pcie) 2108 return -ENOMEM; 2109 2110 pci = &pcie->pci; 2111 pci->dev = &pdev->dev; 2112 pci->ops = &tegra_dw_pcie_ops; 2113 pcie->dev = &pdev->dev; 2114 pcie->of_data = (struct tegra_pcie_dw_of_data *)data; 2115 pci->n_fts[0] = pcie->of_data->n_fts[0]; 2116 pci->n_fts[1] = pcie->of_data->n_fts[1]; 2117 pp = &pci->pp; 2118 pp->num_vectors = MAX_MSI_IRQS; 2119 2120 ret = tegra_pcie_dw_parse_dt(pcie); 2121 if (ret < 0) { 2122 const char *level = KERN_ERR; 2123 2124 if (ret == -EPROBE_DEFER) 2125 level = KERN_DEBUG; 2126 2127 dev_printk(level, dev, 2128 dev_fmt("Failed to parse device tree: %d\n"), 2129 ret); 2130 return ret; 2131 } 2132 2133 ret = tegra_pcie_get_slot_regulators(pcie); 2134 if (ret < 0) { 2135 const char *level = KERN_ERR; 2136 2137 if (ret == -EPROBE_DEFER) 2138 level = KERN_DEBUG; 2139 2140 dev_printk(level, dev, 2141 dev_fmt("Failed to get slot regulators: %d\n"), 2142 ret); 2143 return ret; 2144 } 2145 2146 if (pcie->pex_refclk_sel_gpiod) 2147 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); 2148 2149 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); 2150 if (IS_ERR(pcie->pex_ctl_supply)) { 2151 ret = PTR_ERR(pcie->pex_ctl_supply); 2152 if (ret != -EPROBE_DEFER) 2153 dev_err(dev, "Failed to get regulator: %ld\n", 2154 PTR_ERR(pcie->pex_ctl_supply)); 2155 return ret; 2156 } 2157 2158 pcie->core_clk = devm_clk_get(dev, "core"); 2159 if (IS_ERR(pcie->core_clk)) { 2160 dev_err(dev, "Failed to get core clock: %ld\n", 2161 PTR_ERR(pcie->core_clk)); 2162 return PTR_ERR(pcie->core_clk); 2163 } 2164 2165 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2166 "appl"); 2167 if (!pcie->appl_res) { 2168 dev_err(dev, "Failed to find \"appl\" region\n"); 2169 return -ENODEV; 2170 } 2171 2172 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); 2173 if (IS_ERR(pcie->appl_base)) 2174 return PTR_ERR(pcie->appl_base); 2175 2176 pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); 2177 if (IS_ERR(pcie->core_apb_rst)) { 2178 dev_err(dev, "Failed to get APB reset: %ld\n", 2179 PTR_ERR(pcie->core_apb_rst)); 2180 return PTR_ERR(pcie->core_apb_rst); 2181 } 2182 2183 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); 2184 if (!phys) 2185 return -ENOMEM; 2186 2187 for (i = 0; i < pcie->phy_count; i++) { 2188 name = kasprintf(GFP_KERNEL, "p2u-%u", i); 2189 if (!name) { 2190 dev_err(dev, "Failed to create P2U string\n"); 2191 return -ENOMEM; 2192 } 2193 phys[i] = devm_phy_get(dev, name); 2194 kfree(name); 2195 if (IS_ERR(phys[i])) { 2196 ret = PTR_ERR(phys[i]); 2197 if (ret != -EPROBE_DEFER) 2198 dev_err(dev, "Failed to get PHY: %d\n", ret); 2199 return ret; 2200 } 2201 } 2202 2203 pcie->phys = phys; 2204 2205 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2206 "atu_dma"); 2207 if (!atu_dma_res) { 2208 dev_err(dev, "Failed to find \"atu_dma\" region\n"); 2209 return -ENODEV; 2210 } 2211 pcie->atu_dma_res = atu_dma_res; 2212 2213 pci->atu_size = resource_size(atu_dma_res); 2214 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); 2215 if (IS_ERR(pci->atu_base)) 2216 return PTR_ERR(pci->atu_base); 2217 2218 pcie->core_rst = devm_reset_control_get(dev, "core"); 2219 if (IS_ERR(pcie->core_rst)) { 2220 dev_err(dev, "Failed to get core reset: %ld\n", 2221 PTR_ERR(pcie->core_rst)); 2222 return PTR_ERR(pcie->core_rst); 2223 } 2224 2225 pp->irq = platform_get_irq_byname(pdev, "intr"); 2226 if (pp->irq < 0) 2227 return pp->irq; 2228 2229 pcie->bpmp = tegra_bpmp_get(dev); 2230 if (IS_ERR(pcie->bpmp)) 2231 return PTR_ERR(pcie->bpmp); 2232 2233 platform_set_drvdata(pdev, pcie); 2234 2235 pcie->icc_path = devm_of_icc_get(&pdev->dev, "write"); 2236 ret = PTR_ERR_OR_ZERO(pcie->icc_path); 2237 if (ret) { 2238 tegra_bpmp_put(pcie->bpmp); 2239 dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n"); 2240 return ret; 2241 } 2242 2243 switch (pcie->of_data->mode) { 2244 case DW_PCIE_RC_TYPE: 2245 ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, 2246 IRQF_SHARED, "tegra-pcie-intr", pcie); 2247 if (ret) { 2248 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2249 ret); 2250 goto fail; 2251 } 2252 2253 ret = tegra_pcie_config_rp(pcie); 2254 if (ret && ret != -ENOMEDIUM) 2255 goto fail; 2256 else 2257 return 0; 2258 break; 2259 2260 case DW_PCIE_EP_TYPE: 2261 ret = devm_request_threaded_irq(dev, pp->irq, 2262 tegra_pcie_ep_hard_irq, 2263 tegra_pcie_ep_irq_thread, 2264 IRQF_SHARED | IRQF_ONESHOT, 2265 "tegra-pcie-ep-intr", pcie); 2266 if (ret) { 2267 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2268 ret); 2269 goto fail; 2270 } 2271 2272 ret = tegra_pcie_config_ep(pcie, pdev); 2273 if (ret < 0) 2274 goto fail; 2275 else 2276 return 0; 2277 break; 2278 2279 default: 2280 dev_err(dev, "Invalid PCIe device type %d\n", 2281 pcie->of_data->mode); 2282 ret = -EINVAL; 2283 } 2284 2285 fail: 2286 tegra_bpmp_put(pcie->bpmp); 2287 return ret; 2288 } 2289 2290 static void tegra_pcie_dw_remove(struct platform_device *pdev) 2291 { 2292 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2293 2294 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2295 if (!pcie->link_state) 2296 return; 2297 2298 debugfs_remove_recursive(pcie->debugfs); 2299 tegra_pcie_deinit_controller(pcie); 2300 pm_runtime_put_sync(pcie->dev); 2301 } else { 2302 disable_irq(pcie->pex_rst_irq); 2303 pex_ep_event_pex_rst_assert(pcie); 2304 } 2305 2306 pm_runtime_disable(pcie->dev); 2307 tegra_bpmp_put(pcie->bpmp); 2308 if (pcie->pex_refclk_sel_gpiod) 2309 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); 2310 } 2311 2312 static int tegra_pcie_dw_suspend_late(struct device *dev) 2313 { 2314 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2315 u32 val; 2316 2317 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2318 dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n"); 2319 return -EPERM; 2320 } 2321 2322 if (!pcie->link_state) 2323 return 0; 2324 2325 /* Enable HW_HOT_RST mode */ 2326 if (!pcie->of_data->has_sbr_reset_fix) { 2327 val = appl_readl(pcie, APPL_CTRL); 2328 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2329 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2330 val |= APPL_CTRL_HW_HOT_RST_EN; 2331 appl_writel(pcie, val, APPL_CTRL); 2332 } 2333 2334 return 0; 2335 } 2336 2337 static int tegra_pcie_dw_suspend_noirq(struct device *dev) 2338 { 2339 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2340 2341 if (!pcie->link_state) 2342 return 0; 2343 2344 tegra_pcie_downstream_dev_to_D0(pcie); 2345 tegra_pcie_dw_pme_turnoff(pcie); 2346 tegra_pcie_unconfig_controller(pcie); 2347 2348 return 0; 2349 } 2350 2351 static int tegra_pcie_dw_resume_noirq(struct device *dev) 2352 { 2353 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2354 int ret; 2355 2356 if (!pcie->link_state) 2357 return 0; 2358 2359 ret = tegra_pcie_config_controller(pcie, true); 2360 if (ret < 0) 2361 return ret; 2362 2363 ret = tegra_pcie_dw_host_init(&pcie->pci.pp); 2364 if (ret < 0) { 2365 dev_err(dev, "Failed to init host: %d\n", ret); 2366 goto fail_host_init; 2367 } 2368 2369 dw_pcie_setup_rc(&pcie->pci.pp); 2370 2371 ret = tegra_pcie_dw_start_link(&pcie->pci); 2372 if (ret < 0) 2373 goto fail_host_init; 2374 2375 return 0; 2376 2377 fail_host_init: 2378 tegra_pcie_unconfig_controller(pcie); 2379 return ret; 2380 } 2381 2382 static int tegra_pcie_dw_resume_early(struct device *dev) 2383 { 2384 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2385 u32 val; 2386 2387 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2388 dev_err(dev, "Suspend is not supported in EP mode"); 2389 return -ENOTSUPP; 2390 } 2391 2392 if (!pcie->link_state) 2393 return 0; 2394 2395 /* Disable HW_HOT_RST mode */ 2396 if (!pcie->of_data->has_sbr_reset_fix) { 2397 val = appl_readl(pcie, APPL_CTRL); 2398 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2399 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2400 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << 2401 APPL_CTRL_HW_HOT_RST_MODE_SHIFT; 2402 val &= ~APPL_CTRL_HW_HOT_RST_EN; 2403 appl_writel(pcie, val, APPL_CTRL); 2404 } 2405 2406 return 0; 2407 } 2408 2409 static void tegra_pcie_dw_shutdown(struct platform_device *pdev) 2410 { 2411 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2412 2413 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2414 if (!pcie->link_state) 2415 return; 2416 2417 debugfs_remove_recursive(pcie->debugfs); 2418 tegra_pcie_downstream_dev_to_D0(pcie); 2419 2420 disable_irq(pcie->pci.pp.irq); 2421 if (IS_ENABLED(CONFIG_PCI_MSI)) 2422 disable_irq(pcie->pci.pp.msi_irq[0]); 2423 2424 tegra_pcie_dw_pme_turnoff(pcie); 2425 tegra_pcie_unconfig_controller(pcie); 2426 pm_runtime_put_sync(pcie->dev); 2427 } else { 2428 disable_irq(pcie->pex_rst_irq); 2429 pex_ep_event_pex_rst_assert(pcie); 2430 } 2431 } 2432 2433 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = { 2434 .version = TEGRA194_DWC_IP_VER, 2435 .mode = DW_PCIE_RC_TYPE, 2436 .cdm_chk_int_en_bit = BIT(19), 2437 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2438 .gen4_preset_vec = 0x360, 2439 .n_fts = { 52, 52 }, 2440 }; 2441 2442 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = { 2443 .version = TEGRA194_DWC_IP_VER, 2444 .mode = DW_PCIE_EP_TYPE, 2445 .cdm_chk_int_en_bit = BIT(19), 2446 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2447 .gen4_preset_vec = 0x360, 2448 .n_fts = { 52, 52 }, 2449 }; 2450 2451 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = { 2452 .version = TEGRA234_DWC_IP_VER, 2453 .mode = DW_PCIE_RC_TYPE, 2454 .has_msix_doorbell_access_fix = true, 2455 .has_sbr_reset_fix = true, 2456 .has_l1ss_exit_fix = true, 2457 .cdm_chk_int_en_bit = BIT(18), 2458 /* Gen4 - 6, 8 and 9 presets enabled */ 2459 .gen4_preset_vec = 0x340, 2460 .n_fts = { 52, 80 }, 2461 }; 2462 2463 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = { 2464 .version = TEGRA234_DWC_IP_VER, 2465 .mode = DW_PCIE_EP_TYPE, 2466 .has_l1ss_exit_fix = true, 2467 .has_ltr_req_fix = true, 2468 .cdm_chk_int_en_bit = BIT(18), 2469 /* Gen4 - 6, 8 and 9 presets enabled */ 2470 .gen4_preset_vec = 0x340, 2471 .n_fts = { 52, 80 }, 2472 }; 2473 2474 static const struct of_device_id tegra_pcie_dw_of_match[] = { 2475 { 2476 .compatible = "nvidia,tegra194-pcie", 2477 .data = &tegra194_pcie_dw_rc_of_data, 2478 }, 2479 { 2480 .compatible = "nvidia,tegra194-pcie-ep", 2481 .data = &tegra194_pcie_dw_ep_of_data, 2482 }, 2483 { 2484 .compatible = "nvidia,tegra234-pcie", 2485 .data = &tegra234_pcie_dw_rc_of_data, 2486 }, 2487 { 2488 .compatible = "nvidia,tegra234-pcie-ep", 2489 .data = &tegra234_pcie_dw_ep_of_data, 2490 }, 2491 {} 2492 }; 2493 2494 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { 2495 .suspend_late = tegra_pcie_dw_suspend_late, 2496 .suspend_noirq = tegra_pcie_dw_suspend_noirq, 2497 .resume_noirq = tegra_pcie_dw_resume_noirq, 2498 .resume_early = tegra_pcie_dw_resume_early, 2499 }; 2500 2501 static struct platform_driver tegra_pcie_dw_driver = { 2502 .probe = tegra_pcie_dw_probe, 2503 .remove_new = tegra_pcie_dw_remove, 2504 .shutdown = tegra_pcie_dw_shutdown, 2505 .driver = { 2506 .name = "tegra194-pcie", 2507 .pm = &tegra_pcie_dw_pm_ops, 2508 .of_match_table = tegra_pcie_dw_of_match, 2509 }, 2510 }; 2511 module_platform_driver(tegra_pcie_dw_driver); 2512 2513 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); 2514 2515 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); 2516 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); 2517 MODULE_LICENSE("GPL v2"); 2518