1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for the following SoCs 4 * Tegra194 5 * Tegra234 6 * 7 * Copyright (C) 2019-2022 NVIDIA Corporation. 8 * 9 * Author: Vidya Sagar <vidyas@nvidia.com> 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio.h> 16 #include <linux/gpio/consumer.h> 17 #include <linux/interrupt.h> 18 #include <linux/iopoll.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/of_gpio.h> 24 #include <linux/of_pci.h> 25 #include <linux/pci.h> 26 #include <linux/phy/phy.h> 27 #include <linux/pinctrl/consumer.h> 28 #include <linux/platform_device.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/random.h> 31 #include <linux/reset.h> 32 #include <linux/resource.h> 33 #include <linux/types.h> 34 #include "pcie-designware.h" 35 #include <soc/tegra/bpmp.h> 36 #include <soc/tegra/bpmp-abi.h> 37 #include "../../pci.h" 38 39 #define TEGRA194_DWC_IP_VER 0x490A 40 #define TEGRA234_DWC_IP_VER 0x562A 41 42 #define APPL_PINMUX 0x0 43 #define APPL_PINMUX_PEX_RST BIT(0) 44 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) 45 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) 46 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) 47 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) 48 49 #define APPL_CTRL 0x4 50 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) 51 #define APPL_CTRL_LTSSM_EN BIT(7) 52 #define APPL_CTRL_HW_HOT_RST_EN BIT(20) 53 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) 54 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 55 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 56 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2 57 58 #define APPL_INTR_EN_L0_0 0x8 59 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) 60 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) 61 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) 62 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) 63 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) 64 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) 65 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) 66 67 #define APPL_INTR_STATUS_L0 0xC 68 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) 69 #define APPL_INTR_STATUS_L0_INT_INT BIT(8) 70 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) 71 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) 72 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) 73 74 #define APPL_INTR_EN_L1_0_0 0x1C 75 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) 76 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) 77 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) 78 79 #define APPL_INTR_STATUS_L1_0_0 0x20 80 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) 81 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) 82 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) 83 84 #define APPL_INTR_STATUS_L1_1 0x2C 85 #define APPL_INTR_STATUS_L1_2 0x30 86 #define APPL_INTR_STATUS_L1_3 0x34 87 #define APPL_INTR_STATUS_L1_6 0x3C 88 #define APPL_INTR_STATUS_L1_7 0x40 89 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) 90 91 #define APPL_INTR_EN_L1_8_0 0x44 92 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) 93 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) 94 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11) 95 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) 96 97 #define APPL_INTR_STATUS_L1_8_0 0x4C 98 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) 99 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) 100 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) 101 102 #define APPL_INTR_STATUS_L1_9 0x54 103 #define APPL_INTR_STATUS_L1_10 0x58 104 #define APPL_INTR_STATUS_L1_11 0x64 105 #define APPL_INTR_STATUS_L1_13 0x74 106 #define APPL_INTR_STATUS_L1_14 0x78 107 #define APPL_INTR_STATUS_L1_15 0x7C 108 #define APPL_INTR_STATUS_L1_17 0x88 109 110 #define APPL_INTR_EN_L1_18 0x90 111 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) 112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 113 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 114 115 #define APPL_INTR_STATUS_L1_18 0x94 116 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) 117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 118 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 119 120 #define APPL_MSI_CTRL_1 0xAC 121 122 #define APPL_MSI_CTRL_2 0xB0 123 124 #define APPL_LEGACY_INTX 0xB8 125 126 #define APPL_LTR_MSG_1 0xC4 127 #define LTR_MSG_REQ BIT(15) 128 #define LTR_MST_NO_SNOOP_SHIFT 16 129 130 #define APPL_LTR_MSG_2 0xC8 131 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) 132 133 #define APPL_LINK_STATUS 0xCC 134 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) 135 136 #define APPL_DEBUG 0xD0 137 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) 138 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11 139 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) 140 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 141 #define LTSSM_STATE_PRE_DETECT 5 142 143 #define APPL_RADM_STATUS 0xE4 144 #define APPL_PM_XMT_TURNOFF_STATE BIT(0) 145 146 #define APPL_DM_TYPE 0x100 147 #define APPL_DM_TYPE_MASK GENMASK(3, 0) 148 #define APPL_DM_TYPE_RP 0x4 149 #define APPL_DM_TYPE_EP 0x0 150 151 #define APPL_CFG_BASE_ADDR 0x104 152 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) 153 154 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 155 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) 156 157 #define APPL_CFG_MISC 0x110 158 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14) 159 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) 160 #define APPL_CFG_MISC_ARCACHE_SHIFT 10 161 #define APPL_CFG_MISC_ARCACHE_VAL 3 162 163 #define APPL_CFG_SLCG_OVERRIDE 0x114 164 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) 165 166 #define APPL_CAR_RESET_OVRD 0x12C 167 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) 168 169 #define IO_BASE_IO_DECODE BIT(0) 170 #define IO_BASE_IO_DECODE_BIT8 BIT(8) 171 172 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) 173 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) 174 175 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 176 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) 177 178 #define N_FTS_VAL 52 179 #define FTS_VAL 52 180 181 #define GEN3_EQ_CONTROL_OFF 0x8a8 182 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 183 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) 184 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) 185 186 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 187 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 188 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) 189 #define AMBA_ERROR_RESPONSE_CRS_OKAY 0 190 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 191 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 192 193 #define MSIX_ADDR_MATCH_LOW_OFF 0x940 194 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) 195 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) 196 197 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944 198 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) 199 200 #define PORT_LOGIC_MSIX_DOORBELL 0x948 201 202 #define CAP_SPCIE_CAP_OFF 0x154 203 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) 204 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) 205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 206 207 #define PME_ACK_TIMEOUT 10000 208 209 #define LTSSM_TIMEOUT 50000 /* 50ms */ 210 211 #define GEN3_GEN4_EQ_PRESET_INIT 5 212 213 #define GEN1_CORE_CLK_FREQ 62500000 214 #define GEN2_CORE_CLK_FREQ 125000000 215 #define GEN3_CORE_CLK_FREQ 250000000 216 #define GEN4_CORE_CLK_FREQ 500000000 217 218 #define LTR_MSG_TIMEOUT (100 * 1000) 219 220 #define PERST_DEBOUNCE_TIME (5 * 1000) 221 222 #define EP_STATE_DISABLED 0 223 #define EP_STATE_ENABLED 1 224 225 static const unsigned int pcie_gen_freq[] = { 226 GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ 227 GEN1_CORE_CLK_FREQ, 228 GEN2_CORE_CLK_FREQ, 229 GEN3_CORE_CLK_FREQ, 230 GEN4_CORE_CLK_FREQ 231 }; 232 233 struct tegra_pcie_dw_of_data { 234 u32 version; 235 enum dw_pcie_device_mode mode; 236 bool has_msix_doorbell_access_fix; 237 bool has_sbr_reset_fix; 238 bool has_l1ss_exit_fix; 239 bool has_ltr_req_fix; 240 u32 cdm_chk_int_en_bit; 241 u32 gen4_preset_vec; 242 u8 n_fts[2]; 243 }; 244 245 struct tegra_pcie_dw { 246 struct device *dev; 247 struct resource *appl_res; 248 struct resource *dbi_res; 249 struct resource *atu_dma_res; 250 void __iomem *appl_base; 251 struct clk *core_clk; 252 struct reset_control *core_apb_rst; 253 struct reset_control *core_rst; 254 struct dw_pcie pci; 255 struct tegra_bpmp *bpmp; 256 257 struct tegra_pcie_dw_of_data *of_data; 258 259 bool supports_clkreq; 260 bool enable_cdm_check; 261 bool enable_srns; 262 bool link_state; 263 bool update_fc_fixup; 264 bool enable_ext_refclk; 265 u8 init_link_width; 266 u32 msi_ctrl_int; 267 u32 num_lanes; 268 u32 cid; 269 u32 cfg_link_cap_l1sub; 270 u32 ras_des_cap; 271 u32 pcie_cap_base; 272 u32 aspm_cmrt; 273 u32 aspm_pwr_on_t; 274 u32 aspm_l0s_enter_lat; 275 276 struct regulator *pex_ctl_supply; 277 struct regulator *slot_ctl_3v3; 278 struct regulator *slot_ctl_12v; 279 280 unsigned int phy_count; 281 struct phy **phys; 282 283 struct dentry *debugfs; 284 285 /* Endpoint mode specific */ 286 struct gpio_desc *pex_rst_gpiod; 287 struct gpio_desc *pex_refclk_sel_gpiod; 288 unsigned int pex_rst_irq; 289 int ep_state; 290 long link_status; 291 }; 292 293 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) 294 { 295 return container_of(pci, struct tegra_pcie_dw, pci); 296 } 297 298 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, 299 const u32 reg) 300 { 301 writel_relaxed(value, pcie->appl_base + reg); 302 } 303 304 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) 305 { 306 return readl_relaxed(pcie->appl_base + reg); 307 } 308 309 struct tegra_pcie_soc { 310 enum dw_pcie_device_mode mode; 311 }; 312 313 static void apply_bad_link_workaround(struct dw_pcie_rp *pp) 314 { 315 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 316 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 317 u32 current_link_width; 318 u16 val; 319 320 /* 321 * NOTE:- Since this scenario is uncommon and link as such is not 322 * stable anyway, not waiting to confirm if link is really 323 * transitioning to Gen-2 speed 324 */ 325 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 326 if (val & PCI_EXP_LNKSTA_LBMS) { 327 current_link_width = (val & PCI_EXP_LNKSTA_NLW) >> 328 PCI_EXP_LNKSTA_NLW_SHIFT; 329 if (pcie->init_link_width > current_link_width) { 330 dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); 331 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 332 PCI_EXP_LNKCTL2); 333 val &= ~PCI_EXP_LNKCTL2_TLS; 334 val |= PCI_EXP_LNKCTL2_TLS_2_5GT; 335 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 336 PCI_EXP_LNKCTL2, val); 337 338 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 339 PCI_EXP_LNKCTL); 340 val |= PCI_EXP_LNKCTL_RL; 341 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 342 PCI_EXP_LNKCTL, val); 343 } 344 } 345 } 346 347 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) 348 { 349 struct tegra_pcie_dw *pcie = arg; 350 struct dw_pcie *pci = &pcie->pci; 351 struct dw_pcie_rp *pp = &pci->pp; 352 u32 val, status_l0, status_l1; 353 u16 val_w; 354 355 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 356 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 357 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 358 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 359 if (!pcie->of_data->has_sbr_reset_fix && 360 status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { 361 /* SBR & Surprise Link Down WAR */ 362 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 363 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 364 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 365 udelay(1); 366 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 367 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 368 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 369 370 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 371 val |= PORT_LOGIC_SPEED_CHANGE; 372 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 373 } 374 } 375 376 if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { 377 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); 378 if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { 379 appl_writel(pcie, 380 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, 381 APPL_INTR_STATUS_L1_8_0); 382 apply_bad_link_workaround(pp); 383 } 384 if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { 385 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 386 PCI_EXP_LNKSTA); 387 val_w |= PCI_EXP_LNKSTA_LBMS; 388 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 389 PCI_EXP_LNKSTA, val_w); 390 391 appl_writel(pcie, 392 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, 393 APPL_INTR_STATUS_L1_8_0); 394 395 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 396 PCI_EXP_LNKSTA); 397 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & 398 PCI_EXP_LNKSTA_CLS); 399 } 400 } 401 402 if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { 403 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); 404 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 405 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { 406 dev_info(pci->dev, "CDM check complete\n"); 407 val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; 408 } 409 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { 410 dev_err(pci->dev, "CDM comparison mismatch\n"); 411 val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; 412 } 413 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { 414 dev_err(pci->dev, "CDM Logic error\n"); 415 val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; 416 } 417 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 418 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); 419 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val); 420 } 421 422 return IRQ_HANDLED; 423 } 424 425 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) 426 { 427 u32 val; 428 429 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 430 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 431 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 432 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 433 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 434 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 435 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 436 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 437 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 438 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 439 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 440 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 441 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 442 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 443 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 444 appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); 445 446 val = appl_readl(pcie, APPL_CTRL); 447 val |= APPL_CTRL_LTSSM_EN; 448 appl_writel(pcie, val, APPL_CTRL); 449 } 450 451 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) 452 { 453 struct tegra_pcie_dw *pcie = arg; 454 struct dw_pcie_ep *ep = &pcie->pci.ep; 455 struct dw_pcie *pci = &pcie->pci; 456 u32 val, speed; 457 458 if (test_and_clear_bit(0, &pcie->link_status)) 459 dw_pcie_ep_linkup(ep); 460 461 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & 462 PCI_EXP_LNKSTA_CLS; 463 464 if (speed >= ARRAY_SIZE(pcie_gen_freq)) 465 speed = 0; 466 467 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); 468 469 if (pcie->of_data->has_ltr_req_fix) 470 return IRQ_HANDLED; 471 472 /* If EP doesn't advertise L1SS, just return */ 473 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 474 if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) 475 return IRQ_HANDLED; 476 477 /* Check if BME is set to '1' */ 478 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 479 if (val & PCI_COMMAND_MASTER) { 480 ktime_t timeout; 481 482 /* 110us for both snoop and no-snoop */ 483 val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; 484 val |= (val << LTR_MST_NO_SNOOP_SHIFT); 485 appl_writel(pcie, val, APPL_LTR_MSG_1); 486 487 /* Send LTR upstream */ 488 val = appl_readl(pcie, APPL_LTR_MSG_2); 489 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 490 appl_writel(pcie, val, APPL_LTR_MSG_2); 491 492 timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); 493 for (;;) { 494 val = appl_readl(pcie, APPL_LTR_MSG_2); 495 if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) 496 break; 497 if (ktime_after(ktime_get(), timeout)) 498 break; 499 usleep_range(1000, 1100); 500 } 501 if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) 502 dev_err(pcie->dev, "Failed to send LTR message\n"); 503 } 504 505 return IRQ_HANDLED; 506 } 507 508 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) 509 { 510 struct tegra_pcie_dw *pcie = arg; 511 int spurious = 1; 512 u32 status_l0, status_l1, link_status; 513 514 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 515 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 516 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 517 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 518 519 if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) 520 pex_ep_event_hot_rst_done(pcie); 521 522 if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { 523 link_status = appl_readl(pcie, APPL_LINK_STATUS); 524 if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) { 525 dev_dbg(pcie->dev, "Link is up with Host\n"); 526 set_bit(0, &pcie->link_status); 527 return IRQ_WAKE_THREAD; 528 } 529 } 530 531 spurious = 0; 532 } 533 534 if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { 535 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15); 536 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15); 537 538 if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) 539 return IRQ_WAKE_THREAD; 540 541 spurious = 0; 542 } 543 544 if (spurious) { 545 dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", 546 status_l0); 547 appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0); 548 } 549 550 return IRQ_HANDLED; 551 } 552 553 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, 554 int size, u32 *val) 555 { 556 struct dw_pcie_rp *pp = bus->sysdata; 557 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 558 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 559 560 /* 561 * This is an endpoint mode specific register happen to appear even 562 * when controller is operating in root port mode and system hangs 563 * when it is accessed with link being in ASPM-L1 state. 564 * So skip accessing it altogether 565 */ 566 if (!pcie->of_data->has_msix_doorbell_access_fix && 567 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { 568 *val = 0x00000000; 569 return PCIBIOS_SUCCESSFUL; 570 } 571 572 return pci_generic_config_read(bus, devfn, where, size, val); 573 } 574 575 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, 576 int size, u32 val) 577 { 578 struct dw_pcie_rp *pp = bus->sysdata; 579 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 580 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 581 582 /* 583 * This is an endpoint mode specific register happen to appear even 584 * when controller is operating in root port mode and system hangs 585 * when it is accessed with link being in ASPM-L1 state. 586 * So skip accessing it altogether 587 */ 588 if (!pcie->of_data->has_msix_doorbell_access_fix && 589 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) 590 return PCIBIOS_SUCCESSFUL; 591 592 return pci_generic_config_write(bus, devfn, where, size, val); 593 } 594 595 static struct pci_ops tegra_pci_ops = { 596 .map_bus = dw_pcie_own_conf_map_bus, 597 .read = tegra_pcie_dw_rd_own_conf, 598 .write = tegra_pcie_dw_wr_own_conf, 599 }; 600 601 #if defined(CONFIG_PCIEASPM) 602 static void disable_aspm_l11(struct tegra_pcie_dw *pcie) 603 { 604 u32 val; 605 606 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 607 val &= ~PCI_L1SS_CAP_ASPM_L1_1; 608 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 609 } 610 611 static void disable_aspm_l12(struct tegra_pcie_dw *pcie) 612 { 613 u32 val; 614 615 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); 616 val &= ~PCI_L1SS_CAP_ASPM_L1_2; 617 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); 618 } 619 620 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) 621 { 622 u32 val; 623 624 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 625 PCIE_RAS_DES_EVENT_COUNTER_CONTROL); 626 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); 627 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 628 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; 629 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 630 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 631 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 632 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 633 PCIE_RAS_DES_EVENT_COUNTER_DATA); 634 635 return val; 636 } 637 638 static int aspm_state_cnt(struct seq_file *s, void *data) 639 { 640 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) 641 dev_get_drvdata(s->private); 642 u32 val; 643 644 seq_printf(s, "Tx L0s entry count : %u\n", 645 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); 646 647 seq_printf(s, "Rx L0s entry count : %u\n", 648 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); 649 650 seq_printf(s, "Link L1 entry count : %u\n", 651 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); 652 653 seq_printf(s, "Link L1.1 entry count : %u\n", 654 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); 655 656 seq_printf(s, "Link L1.2 entry count : %u\n", 657 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); 658 659 /* Clear all counters */ 660 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 661 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, 662 EVENT_COUNTER_ALL_CLEAR); 663 664 /* Re-enable counting */ 665 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 666 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 667 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 668 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 669 670 return 0; 671 } 672 673 static void init_host_aspm(struct tegra_pcie_dw *pcie) 674 { 675 struct dw_pcie *pci = &pcie->pci; 676 u32 val; 677 678 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); 679 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; 680 681 pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, 682 PCI_EXT_CAP_ID_VNDR); 683 684 /* Enable ASPM counters */ 685 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 686 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 687 dw_pcie_writel_dbi(pci, pcie->ras_des_cap + 688 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 689 690 /* Program T_cmrt and T_pwr_on values */ 691 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); 692 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); 693 val |= (pcie->aspm_cmrt << 8); 694 val |= (pcie->aspm_pwr_on_t << 19); 695 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); 696 697 /* Program L0s and L1 entrance latencies */ 698 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); 699 val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; 700 val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); 701 val |= PORT_AFR_ENTER_ASPM; 702 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); 703 } 704 705 static void init_debugfs(struct tegra_pcie_dw *pcie) 706 { 707 debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, 708 aspm_state_cnt); 709 } 710 #else 711 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } 712 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } 713 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } 714 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } 715 #endif 716 717 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) 718 { 719 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 720 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 721 u32 val; 722 u16 val_w; 723 724 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 725 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 726 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 727 728 if (!pcie->of_data->has_sbr_reset_fix) { 729 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 730 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; 731 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 732 } 733 734 if (pcie->enable_cdm_check) { 735 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 736 val |= pcie->of_data->cdm_chk_int_en_bit; 737 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 738 739 val = appl_readl(pcie, APPL_INTR_EN_L1_18); 740 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; 741 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; 742 appl_writel(pcie, val, APPL_INTR_EN_L1_18); 743 } 744 745 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 746 PCI_EXP_LNKSTA); 747 pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >> 748 PCI_EXP_LNKSTA_NLW_SHIFT; 749 750 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 751 PCI_EXP_LNKCTL); 752 val_w |= PCI_EXP_LNKCTL_LBMIE; 753 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, 754 val_w); 755 } 756 757 static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp) 758 { 759 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 760 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 761 u32 val; 762 763 /* Enable legacy interrupt generation */ 764 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 765 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 766 val |= APPL_INTR_EN_L0_0_INT_INT_EN; 767 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 768 769 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); 770 val |= APPL_INTR_EN_L1_8_INTX_EN; 771 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; 772 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; 773 if (IS_ENABLED(CONFIG_PCIEAER)) 774 val |= APPL_INTR_EN_L1_8_AER_INT_EN; 775 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); 776 } 777 778 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp) 779 { 780 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 781 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 782 u32 val; 783 784 /* Enable MSI interrupt generation */ 785 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 786 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; 787 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; 788 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 789 } 790 791 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) 792 { 793 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 794 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 795 796 /* Clear interrupt statuses before enabling interrupts */ 797 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 798 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 799 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 800 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 801 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 802 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 803 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 804 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 805 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 806 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 807 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 808 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 809 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 810 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 811 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 812 813 tegra_pcie_enable_system_interrupts(pp); 814 tegra_pcie_enable_legacy_interrupts(pp); 815 if (IS_ENABLED(CONFIG_PCI_MSI)) 816 tegra_pcie_enable_msi_interrupts(pp); 817 } 818 819 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) 820 { 821 struct dw_pcie *pci = &pcie->pci; 822 u32 val, offset, i; 823 824 /* Program init preset */ 825 for (i = 0; i < pcie->num_lanes; i++) { 826 val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2)); 827 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; 828 val |= GEN3_GEN4_EQ_PRESET_INIT; 829 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; 830 val |= (GEN3_GEN4_EQ_PRESET_INIT << 831 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); 832 dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val); 833 834 offset = dw_pcie_find_ext_capability(pci, 835 PCI_EXT_CAP_ID_PL_16GT) + 836 PCI_PL_16GT_LE_CTRL; 837 val = dw_pcie_readb_dbi(pci, offset + i); 838 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; 839 val |= GEN3_GEN4_EQ_PRESET_INIT; 840 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; 841 val |= (GEN3_GEN4_EQ_PRESET_INIT << 842 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); 843 dw_pcie_writeb_dbi(pci, offset + i, val); 844 } 845 846 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 847 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 848 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 849 850 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 851 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 852 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 853 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 854 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 855 856 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 857 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 858 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); 859 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 860 861 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 862 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; 863 val |= (pcie->of_data->gen4_preset_vec << 864 GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); 865 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; 866 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 867 868 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 869 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 870 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 871 } 872 873 static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) 874 { 875 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 876 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 877 u32 val; 878 u16 val_16; 879 880 pp->bridge->ops = &tegra_pci_ops; 881 882 if (!pcie->pcie_cap_base) 883 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 884 PCI_CAP_ID_EXP); 885 886 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); 887 val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 888 val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; 889 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); 890 891 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); 892 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); 893 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); 894 895 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); 896 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; 897 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; 898 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); 899 900 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 901 902 /* Enable as 0xFFFF0001 response for CRS */ 903 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); 904 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); 905 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 << 906 AMBA_ERROR_RESPONSE_CRS_SHIFT); 907 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); 908 909 /* Configure Max lane width from DT */ 910 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP); 911 val &= ~PCI_EXP_LNKCAP_MLW; 912 val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); 913 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); 914 915 /* Clear Slot Clock Configuration bit if SRNS configuration */ 916 if (pcie->enable_srns) { 917 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 918 PCI_EXP_LNKSTA); 919 val_16 &= ~PCI_EXP_LNKSTA_SLC; 920 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 921 val_16); 922 } 923 924 config_gen3_gen4_eq_presets(pcie); 925 926 init_host_aspm(pcie); 927 928 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ 929 if (!pcie->supports_clkreq) { 930 disable_aspm_l11(pcie); 931 disable_aspm_l12(pcie); 932 } 933 934 if (!pcie->of_data->has_l1ss_exit_fix) { 935 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 936 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 937 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 938 } 939 940 if (pcie->update_fc_fixup) { 941 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 942 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 943 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 944 } 945 946 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 947 948 return 0; 949 } 950 951 static int tegra_pcie_dw_start_link(struct dw_pcie *pci) 952 { 953 u32 val, offset, speed, tmp; 954 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 955 struct dw_pcie_rp *pp = &pci->pp; 956 bool retry = true; 957 958 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 959 enable_irq(pcie->pex_rst_irq); 960 return 0; 961 } 962 963 retry_link: 964 /* Assert RST */ 965 val = appl_readl(pcie, APPL_PINMUX); 966 val &= ~APPL_PINMUX_PEX_RST; 967 appl_writel(pcie, val, APPL_PINMUX); 968 969 usleep_range(100, 200); 970 971 /* Enable LTSSM */ 972 val = appl_readl(pcie, APPL_CTRL); 973 val |= APPL_CTRL_LTSSM_EN; 974 appl_writel(pcie, val, APPL_CTRL); 975 976 /* De-assert RST */ 977 val = appl_readl(pcie, APPL_PINMUX); 978 val |= APPL_PINMUX_PEX_RST; 979 appl_writel(pcie, val, APPL_PINMUX); 980 981 msleep(100); 982 983 if (dw_pcie_wait_for_link(pci)) { 984 if (!retry) 985 return 0; 986 /* 987 * There are some endpoints which can't get the link up if 988 * root port has Data Link Feature (DLF) enabled. 989 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info 990 * on Scaled Flow Control and DLF. 991 * So, need to confirm that is indeed the case here and attempt 992 * link up once again with DLF disabled. 993 */ 994 val = appl_readl(pcie, APPL_DEBUG); 995 val &= APPL_DEBUG_LTSSM_STATE_MASK; 996 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; 997 tmp = appl_readl(pcie, APPL_LINK_STATUS); 998 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; 999 if (!(val == 0x11 && !tmp)) { 1000 /* Link is down for all good reasons */ 1001 return 0; 1002 } 1003 1004 dev_info(pci->dev, "Link is down in DLL"); 1005 dev_info(pci->dev, "Trying again with DLFE disabled\n"); 1006 /* Disable LTSSM */ 1007 val = appl_readl(pcie, APPL_CTRL); 1008 val &= ~APPL_CTRL_LTSSM_EN; 1009 appl_writel(pcie, val, APPL_CTRL); 1010 1011 reset_control_assert(pcie->core_rst); 1012 reset_control_deassert(pcie->core_rst); 1013 1014 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); 1015 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); 1016 val &= ~PCI_DLF_EXCHANGE_ENABLE; 1017 dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); 1018 1019 tegra_pcie_dw_host_init(pp); 1020 dw_pcie_setup_rc(pp); 1021 1022 retry = false; 1023 goto retry_link; 1024 } 1025 1026 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & 1027 PCI_EXP_LNKSTA_CLS; 1028 1029 if (speed >= ARRAY_SIZE(pcie_gen_freq)) 1030 speed = 0; 1031 1032 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); 1033 1034 tegra_pcie_enable_interrupts(pp); 1035 1036 return 0; 1037 } 1038 1039 static int tegra_pcie_dw_link_up(struct dw_pcie *pci) 1040 { 1041 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1042 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 1043 1044 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1045 } 1046 1047 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) 1048 { 1049 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1050 1051 disable_irq(pcie->pex_rst_irq); 1052 } 1053 1054 static const struct dw_pcie_ops tegra_dw_pcie_ops = { 1055 .link_up = tegra_pcie_dw_link_up, 1056 .start_link = tegra_pcie_dw_start_link, 1057 .stop_link = tegra_pcie_dw_stop_link, 1058 }; 1059 1060 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { 1061 .host_init = tegra_pcie_dw_host_init, 1062 }; 1063 1064 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) 1065 { 1066 unsigned int phy_count = pcie->phy_count; 1067 1068 while (phy_count--) { 1069 phy_power_off(pcie->phys[phy_count]); 1070 phy_exit(pcie->phys[phy_count]); 1071 } 1072 } 1073 1074 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) 1075 { 1076 unsigned int i; 1077 int ret; 1078 1079 for (i = 0; i < pcie->phy_count; i++) { 1080 ret = phy_init(pcie->phys[i]); 1081 if (ret < 0) 1082 goto phy_power_off; 1083 1084 ret = phy_power_on(pcie->phys[i]); 1085 if (ret < 0) 1086 goto phy_exit; 1087 } 1088 1089 return 0; 1090 1091 phy_power_off: 1092 while (i--) { 1093 phy_power_off(pcie->phys[i]); 1094 phy_exit: 1095 phy_exit(pcie->phys[i]); 1096 } 1097 1098 return ret; 1099 } 1100 1101 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) 1102 { 1103 struct platform_device *pdev = to_platform_device(pcie->dev); 1104 struct device_node *np = pcie->dev->of_node; 1105 int ret; 1106 1107 pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 1108 if (!pcie->dbi_res) { 1109 dev_err(pcie->dev, "Failed to find \"dbi\" region\n"); 1110 return -ENODEV; 1111 } 1112 1113 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); 1114 if (ret < 0) { 1115 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); 1116 return ret; 1117 } 1118 1119 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", 1120 &pcie->aspm_pwr_on_t); 1121 if (ret < 0) 1122 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", 1123 ret); 1124 1125 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", 1126 &pcie->aspm_l0s_enter_lat); 1127 if (ret < 0) 1128 dev_info(pcie->dev, 1129 "Failed to read ASPM L0s Entrance latency: %d\n", ret); 1130 1131 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); 1132 if (ret < 0) { 1133 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); 1134 return ret; 1135 } 1136 1137 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); 1138 if (ret) { 1139 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); 1140 return ret; 1141 } 1142 1143 ret = of_property_count_strings(np, "phy-names"); 1144 if (ret < 0) { 1145 dev_err(pcie->dev, "Failed to find PHY entries: %d\n", 1146 ret); 1147 return ret; 1148 } 1149 pcie->phy_count = ret; 1150 1151 if (of_property_read_bool(np, "nvidia,update-fc-fixup")) 1152 pcie->update_fc_fixup = true; 1153 1154 /* RP using an external REFCLK is supported only in Tegra234 */ 1155 if (pcie->of_data->version == TEGRA194_DWC_IP_VER) { 1156 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 1157 pcie->enable_ext_refclk = true; 1158 } else { 1159 pcie->enable_ext_refclk = 1160 of_property_read_bool(pcie->dev->of_node, 1161 "nvidia,enable-ext-refclk"); 1162 } 1163 1164 pcie->supports_clkreq = 1165 of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); 1166 1167 pcie->enable_cdm_check = 1168 of_property_read_bool(np, "snps,enable-cdm-check"); 1169 1170 if (pcie->of_data->version == TEGRA234_DWC_IP_VER) 1171 pcie->enable_srns = 1172 of_property_read_bool(np, "nvidia,enable-srns"); 1173 1174 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) 1175 return 0; 1176 1177 /* Endpoint mode specific DT entries */ 1178 pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); 1179 if (IS_ERR(pcie->pex_rst_gpiod)) { 1180 int err = PTR_ERR(pcie->pex_rst_gpiod); 1181 const char *level = KERN_ERR; 1182 1183 if (err == -EPROBE_DEFER) 1184 level = KERN_DEBUG; 1185 1186 dev_printk(level, pcie->dev, 1187 dev_fmt("Failed to get PERST GPIO: %d\n"), 1188 err); 1189 return err; 1190 } 1191 1192 pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, 1193 "nvidia,refclk-select", 1194 GPIOD_OUT_HIGH); 1195 if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { 1196 int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); 1197 const char *level = KERN_ERR; 1198 1199 if (err == -EPROBE_DEFER) 1200 level = KERN_DEBUG; 1201 1202 dev_printk(level, pcie->dev, 1203 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), 1204 err); 1205 pcie->pex_refclk_sel_gpiod = NULL; 1206 } 1207 1208 return 0; 1209 } 1210 1211 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, 1212 bool enable) 1213 { 1214 struct mrq_uphy_response resp; 1215 struct tegra_bpmp_message msg; 1216 struct mrq_uphy_request req; 1217 1218 /* 1219 * Controller-5 doesn't need to have its state set by BPMP-FW in 1220 * Tegra194 1221 */ 1222 if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5) 1223 return 0; 1224 1225 memset(&req, 0, sizeof(req)); 1226 memset(&resp, 0, sizeof(resp)); 1227 1228 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; 1229 req.controller_state.pcie_controller = pcie->cid; 1230 req.controller_state.enable = enable; 1231 1232 memset(&msg, 0, sizeof(msg)); 1233 msg.mrq = MRQ_UPHY; 1234 msg.tx.data = &req; 1235 msg.tx.size = sizeof(req); 1236 msg.rx.data = &resp; 1237 msg.rx.size = sizeof(resp); 1238 1239 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1240 } 1241 1242 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, 1243 bool enable) 1244 { 1245 struct mrq_uphy_response resp; 1246 struct tegra_bpmp_message msg; 1247 struct mrq_uphy_request req; 1248 1249 memset(&req, 0, sizeof(req)); 1250 memset(&resp, 0, sizeof(resp)); 1251 1252 if (enable) { 1253 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; 1254 req.ep_ctrlr_pll_init.ep_controller = pcie->cid; 1255 } else { 1256 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; 1257 req.ep_ctrlr_pll_off.ep_controller = pcie->cid; 1258 } 1259 1260 memset(&msg, 0, sizeof(msg)); 1261 msg.mrq = MRQ_UPHY; 1262 msg.tx.data = &req; 1263 msg.tx.size = sizeof(req); 1264 msg.rx.data = &resp; 1265 msg.rx.size = sizeof(resp); 1266 1267 return tegra_bpmp_transfer(pcie->bpmp, &msg); 1268 } 1269 1270 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) 1271 { 1272 struct dw_pcie_rp *pp = &pcie->pci.pp; 1273 struct pci_bus *child, *root_bus = NULL; 1274 struct pci_dev *pdev; 1275 1276 /* 1277 * link doesn't go into L2 state with some of the endpoints with Tegra 1278 * if they are not in D0 state. So, need to make sure that immediate 1279 * downstream devices are in D0 state before sending PME_TurnOff to put 1280 * link into L2 state. 1281 * This is as per PCI Express Base r4.0 v1.0 September 27-2017, 1282 * 5.2 Link State Power Management (Page #428). 1283 */ 1284 1285 list_for_each_entry(child, &pp->bridge->bus->children, node) { 1286 /* Bring downstream devices to D0 if they are not already in */ 1287 if (child->parent == pp->bridge->bus) { 1288 root_bus = child; 1289 break; 1290 } 1291 } 1292 1293 if (!root_bus) { 1294 dev_err(pcie->dev, "Failed to find downstream devices\n"); 1295 return; 1296 } 1297 1298 list_for_each_entry(pdev, &root_bus->devices, bus_list) { 1299 if (PCI_SLOT(pdev->devfn) == 0) { 1300 if (pci_set_power_state(pdev, PCI_D0)) 1301 dev_err(pcie->dev, 1302 "Failed to transition %s to D0 state\n", 1303 dev_name(&pdev->dev)); 1304 } 1305 } 1306 } 1307 1308 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) 1309 { 1310 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); 1311 if (IS_ERR(pcie->slot_ctl_3v3)) { 1312 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) 1313 return PTR_ERR(pcie->slot_ctl_3v3); 1314 1315 pcie->slot_ctl_3v3 = NULL; 1316 } 1317 1318 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); 1319 if (IS_ERR(pcie->slot_ctl_12v)) { 1320 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) 1321 return PTR_ERR(pcie->slot_ctl_12v); 1322 1323 pcie->slot_ctl_12v = NULL; 1324 } 1325 1326 return 0; 1327 } 1328 1329 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) 1330 { 1331 int ret; 1332 1333 if (pcie->slot_ctl_3v3) { 1334 ret = regulator_enable(pcie->slot_ctl_3v3); 1335 if (ret < 0) { 1336 dev_err(pcie->dev, 1337 "Failed to enable 3.3V slot supply: %d\n", ret); 1338 return ret; 1339 } 1340 } 1341 1342 if (pcie->slot_ctl_12v) { 1343 ret = regulator_enable(pcie->slot_ctl_12v); 1344 if (ret < 0) { 1345 dev_err(pcie->dev, 1346 "Failed to enable 12V slot supply: %d\n", ret); 1347 goto fail_12v_enable; 1348 } 1349 } 1350 1351 /* 1352 * According to PCI Express Card Electromechanical Specification 1353 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) 1354 * should be a minimum of 100ms. 1355 */ 1356 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) 1357 msleep(100); 1358 1359 return 0; 1360 1361 fail_12v_enable: 1362 if (pcie->slot_ctl_3v3) 1363 regulator_disable(pcie->slot_ctl_3v3); 1364 return ret; 1365 } 1366 1367 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) 1368 { 1369 if (pcie->slot_ctl_12v) 1370 regulator_disable(pcie->slot_ctl_12v); 1371 if (pcie->slot_ctl_3v3) 1372 regulator_disable(pcie->slot_ctl_3v3); 1373 } 1374 1375 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, 1376 bool en_hw_hot_rst) 1377 { 1378 int ret; 1379 u32 val; 1380 1381 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1382 if (ret) { 1383 dev_err(pcie->dev, 1384 "Failed to enable controller %u: %d\n", pcie->cid, ret); 1385 return ret; 1386 } 1387 1388 if (pcie->enable_ext_refclk) { 1389 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1390 if (ret) { 1391 dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret); 1392 goto fail_pll_init; 1393 } 1394 } 1395 1396 ret = tegra_pcie_enable_slot_regulators(pcie); 1397 if (ret < 0) 1398 goto fail_slot_reg_en; 1399 1400 ret = regulator_enable(pcie->pex_ctl_supply); 1401 if (ret < 0) { 1402 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); 1403 goto fail_reg_en; 1404 } 1405 1406 ret = clk_prepare_enable(pcie->core_clk); 1407 if (ret) { 1408 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); 1409 goto fail_core_clk; 1410 } 1411 1412 ret = reset_control_deassert(pcie->core_apb_rst); 1413 if (ret) { 1414 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", 1415 ret); 1416 goto fail_core_apb_rst; 1417 } 1418 1419 if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) { 1420 /* Enable HW_HOT_RST mode */ 1421 val = appl_readl(pcie, APPL_CTRL); 1422 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 1423 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1424 val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << 1425 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1426 val |= APPL_CTRL_HW_HOT_RST_EN; 1427 appl_writel(pcie, val, APPL_CTRL); 1428 } 1429 1430 ret = tegra_pcie_enable_phy(pcie); 1431 if (ret) { 1432 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); 1433 goto fail_phy; 1434 } 1435 1436 /* Update CFG base address */ 1437 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1438 APPL_CFG_BASE_ADDR); 1439 1440 /* Configure this core for RP mode operation */ 1441 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); 1442 1443 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1444 1445 val = appl_readl(pcie, APPL_CTRL); 1446 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); 1447 1448 val = appl_readl(pcie, APPL_CFG_MISC); 1449 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1450 appl_writel(pcie, val, APPL_CFG_MISC); 1451 1452 if (pcie->enable_srns || pcie->enable_ext_refclk) { 1453 /* 1454 * When Tegra PCIe RP is using external clock, it cannot supply 1455 * same clock to its downstream hierarchy. Hence, gate PCIe RP 1456 * REFCLK out pads when RP & EP are using separate clocks or RP 1457 * is using an external REFCLK. 1458 */ 1459 val = appl_readl(pcie, APPL_PINMUX); 1460 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1461 val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1462 appl_writel(pcie, val, APPL_PINMUX); 1463 } 1464 1465 if (!pcie->supports_clkreq) { 1466 val = appl_readl(pcie, APPL_PINMUX); 1467 val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; 1468 val &= ~APPL_PINMUX_CLKREQ_OVERRIDE; 1469 appl_writel(pcie, val, APPL_PINMUX); 1470 } 1471 1472 /* Update iATU_DMA base address */ 1473 appl_writel(pcie, 1474 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1475 APPL_CFG_IATU_DMA_BASE_ADDR); 1476 1477 reset_control_deassert(pcie->core_rst); 1478 1479 return ret; 1480 1481 fail_phy: 1482 reset_control_assert(pcie->core_apb_rst); 1483 fail_core_apb_rst: 1484 clk_disable_unprepare(pcie->core_clk); 1485 fail_core_clk: 1486 regulator_disable(pcie->pex_ctl_supply); 1487 fail_reg_en: 1488 tegra_pcie_disable_slot_regulators(pcie); 1489 fail_slot_reg_en: 1490 if (pcie->enable_ext_refclk) 1491 tegra_pcie_bpmp_set_pll_state(pcie, false); 1492 fail_pll_init: 1493 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1494 1495 return ret; 1496 } 1497 1498 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) 1499 { 1500 int ret; 1501 1502 ret = reset_control_assert(pcie->core_rst); 1503 if (ret) 1504 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret); 1505 1506 tegra_pcie_disable_phy(pcie); 1507 1508 ret = reset_control_assert(pcie->core_apb_rst); 1509 if (ret) 1510 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); 1511 1512 clk_disable_unprepare(pcie->core_clk); 1513 1514 ret = regulator_disable(pcie->pex_ctl_supply); 1515 if (ret) 1516 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); 1517 1518 tegra_pcie_disable_slot_regulators(pcie); 1519 1520 if (pcie->enable_ext_refclk) { 1521 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1522 if (ret) 1523 dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret); 1524 } 1525 1526 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1527 if (ret) 1528 dev_err(pcie->dev, "Failed to disable controller %d: %d\n", 1529 pcie->cid, ret); 1530 } 1531 1532 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) 1533 { 1534 struct dw_pcie *pci = &pcie->pci; 1535 struct dw_pcie_rp *pp = &pci->pp; 1536 int ret; 1537 1538 ret = tegra_pcie_config_controller(pcie, false); 1539 if (ret < 0) 1540 return ret; 1541 1542 pp->ops = &tegra_pcie_dw_host_ops; 1543 1544 ret = dw_pcie_host_init(pp); 1545 if (ret < 0) { 1546 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); 1547 goto fail_host_init; 1548 } 1549 1550 return 0; 1551 1552 fail_host_init: 1553 tegra_pcie_unconfig_controller(pcie); 1554 return ret; 1555 } 1556 1557 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) 1558 { 1559 u32 val; 1560 1561 if (!tegra_pcie_dw_link_up(&pcie->pci)) 1562 return 0; 1563 1564 val = appl_readl(pcie, APPL_RADM_STATUS); 1565 val |= APPL_PM_XMT_TURNOFF_STATE; 1566 appl_writel(pcie, val, APPL_RADM_STATUS); 1567 1568 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val, 1569 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, 1570 1, PME_ACK_TIMEOUT); 1571 } 1572 1573 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) 1574 { 1575 u32 data; 1576 int err; 1577 1578 if (!tegra_pcie_dw_link_up(&pcie->pci)) { 1579 dev_dbg(pcie->dev, "PCIe link is not up...!\n"); 1580 return; 1581 } 1582 1583 /* 1584 * PCIe controller exits from L2 only if reset is applied, so 1585 * controller doesn't handle interrupts. But in cases where 1586 * L2 entry fails, PERST# is asserted which can trigger surprise 1587 * link down AER. However this function call happens in 1588 * suspend_noirq(), so AER interrupt will not be processed. 1589 * Disable all interrupts to avoid such a scenario. 1590 */ 1591 appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0); 1592 1593 if (tegra_pcie_try_link_l2(pcie)) { 1594 dev_info(pcie->dev, "Link didn't transition to L2 state\n"); 1595 /* 1596 * TX lane clock freq will reset to Gen1 only if link is in L2 1597 * or detect state. 1598 * So apply pex_rst to end point to force RP to go into detect 1599 * state 1600 */ 1601 data = appl_readl(pcie, APPL_PINMUX); 1602 data &= ~APPL_PINMUX_PEX_RST; 1603 appl_writel(pcie, data, APPL_PINMUX); 1604 1605 /* 1606 * Some cards do not go to detect state even after de-asserting 1607 * PERST#. So, de-assert LTSSM to bring link to detect state. 1608 */ 1609 data = readl(pcie->appl_base + APPL_CTRL); 1610 data &= ~APPL_CTRL_LTSSM_EN; 1611 writel(data, pcie->appl_base + APPL_CTRL); 1612 1613 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, 1614 data, 1615 ((data & 1616 APPL_DEBUG_LTSSM_STATE_MASK) >> 1617 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1618 LTSSM_STATE_PRE_DETECT, 1619 1, LTSSM_TIMEOUT); 1620 if (err) 1621 dev_info(pcie->dev, "Link didn't go to detect state\n"); 1622 } 1623 /* 1624 * DBI registers may not be accessible after this as PLL-E would be 1625 * down depending on how CLKREQ is pulled by end point 1626 */ 1627 data = appl_readl(pcie, APPL_PINMUX); 1628 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); 1629 /* Cut REFCLK to slot */ 1630 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1631 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1632 appl_writel(pcie, data, APPL_PINMUX); 1633 } 1634 1635 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) 1636 { 1637 tegra_pcie_downstream_dev_to_D0(pcie); 1638 dw_pcie_host_deinit(&pcie->pci.pp); 1639 tegra_pcie_dw_pme_turnoff(pcie); 1640 tegra_pcie_unconfig_controller(pcie); 1641 } 1642 1643 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) 1644 { 1645 struct device *dev = pcie->dev; 1646 char *name; 1647 int ret; 1648 1649 pm_runtime_enable(dev); 1650 1651 ret = pm_runtime_get_sync(dev); 1652 if (ret < 0) { 1653 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1654 ret); 1655 goto fail_pm_get_sync; 1656 } 1657 1658 ret = pinctrl_pm_select_default_state(dev); 1659 if (ret < 0) { 1660 dev_err(dev, "Failed to configure sideband pins: %d\n", ret); 1661 goto fail_pm_get_sync; 1662 } 1663 1664 ret = tegra_pcie_init_controller(pcie); 1665 if (ret < 0) { 1666 dev_err(dev, "Failed to initialize controller: %d\n", ret); 1667 goto fail_pm_get_sync; 1668 } 1669 1670 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); 1671 if (!pcie->link_state) { 1672 ret = -ENOMEDIUM; 1673 goto fail_host_init; 1674 } 1675 1676 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1677 if (!name) { 1678 ret = -ENOMEM; 1679 goto fail_host_init; 1680 } 1681 1682 pcie->debugfs = debugfs_create_dir(name, NULL); 1683 init_debugfs(pcie); 1684 1685 return ret; 1686 1687 fail_host_init: 1688 tegra_pcie_deinit_controller(pcie); 1689 fail_pm_get_sync: 1690 pm_runtime_put_sync(dev); 1691 pm_runtime_disable(dev); 1692 return ret; 1693 } 1694 1695 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) 1696 { 1697 u32 val; 1698 int ret; 1699 1700 if (pcie->ep_state == EP_STATE_DISABLED) 1701 return; 1702 1703 /* Disable LTSSM */ 1704 val = appl_readl(pcie, APPL_CTRL); 1705 val &= ~APPL_CTRL_LTSSM_EN; 1706 appl_writel(pcie, val, APPL_CTRL); 1707 1708 ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, 1709 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> 1710 APPL_DEBUG_LTSSM_STATE_SHIFT) == 1711 LTSSM_STATE_PRE_DETECT, 1712 1, LTSSM_TIMEOUT); 1713 if (ret) 1714 dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); 1715 1716 reset_control_assert(pcie->core_rst); 1717 1718 tegra_pcie_disable_phy(pcie); 1719 1720 reset_control_assert(pcie->core_apb_rst); 1721 1722 clk_disable_unprepare(pcie->core_clk); 1723 1724 pm_runtime_put_sync(pcie->dev); 1725 1726 if (pcie->enable_ext_refclk) { 1727 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1728 if (ret) 1729 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", 1730 ret); 1731 } 1732 1733 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1734 if (ret) 1735 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); 1736 1737 pcie->ep_state = EP_STATE_DISABLED; 1738 dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); 1739 } 1740 1741 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) 1742 { 1743 struct dw_pcie *pci = &pcie->pci; 1744 struct dw_pcie_ep *ep = &pci->ep; 1745 struct device *dev = pcie->dev; 1746 u32 val; 1747 int ret; 1748 u16 val_16; 1749 1750 if (pcie->ep_state == EP_STATE_ENABLED) 1751 return; 1752 1753 ret = pm_runtime_resume_and_get(dev); 1754 if (ret < 0) { 1755 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1756 ret); 1757 return; 1758 } 1759 1760 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1761 if (ret) { 1762 dev_err(pcie->dev, "Failed to enable controller %u: %d\n", 1763 pcie->cid, ret); 1764 goto fail_set_ctrl_state; 1765 } 1766 1767 if (pcie->enable_ext_refclk) { 1768 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1769 if (ret) { 1770 dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", 1771 ret); 1772 goto fail_pll_init; 1773 } 1774 } 1775 1776 ret = clk_prepare_enable(pcie->core_clk); 1777 if (ret) { 1778 dev_err(dev, "Failed to enable core clock: %d\n", ret); 1779 goto fail_core_clk_enable; 1780 } 1781 1782 ret = reset_control_deassert(pcie->core_apb_rst); 1783 if (ret) { 1784 dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); 1785 goto fail_core_apb_rst; 1786 } 1787 1788 ret = tegra_pcie_enable_phy(pcie); 1789 if (ret) { 1790 dev_err(dev, "Failed to enable PHY: %d\n", ret); 1791 goto fail_phy; 1792 } 1793 1794 /* Clear any stale interrupt statuses */ 1795 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 1796 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 1797 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 1798 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 1799 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 1800 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 1801 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 1802 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 1803 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 1804 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 1805 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 1806 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 1807 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 1808 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 1809 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 1810 1811 /* configure this core for EP mode operation */ 1812 val = appl_readl(pcie, APPL_DM_TYPE); 1813 val &= ~APPL_DM_TYPE_MASK; 1814 val |= APPL_DM_TYPE_EP; 1815 appl_writel(pcie, val, APPL_DM_TYPE); 1816 1817 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1818 1819 val = appl_readl(pcie, APPL_CTRL); 1820 val |= APPL_CTRL_SYS_PRE_DET_STATE; 1821 val |= APPL_CTRL_HW_HOT_RST_EN; 1822 appl_writel(pcie, val, APPL_CTRL); 1823 1824 val = appl_readl(pcie, APPL_CFG_MISC); 1825 val |= APPL_CFG_MISC_SLV_EP_MODE; 1826 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1827 appl_writel(pcie, val, APPL_CFG_MISC); 1828 1829 val = appl_readl(pcie, APPL_PINMUX); 1830 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1831 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1832 appl_writel(pcie, val, APPL_PINMUX); 1833 1834 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1835 APPL_CFG_BASE_ADDR); 1836 1837 appl_writel(pcie, pcie->atu_dma_res->start & 1838 APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1839 APPL_CFG_IATU_DMA_BASE_ADDR); 1840 1841 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 1842 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 1843 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 1844 val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; 1845 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 1846 1847 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 1848 val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; 1849 val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; 1850 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 1851 1852 reset_control_deassert(pcie->core_rst); 1853 1854 if (pcie->update_fc_fixup) { 1855 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 1856 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 1857 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 1858 } 1859 1860 config_gen3_gen4_eq_presets(pcie); 1861 1862 init_host_aspm(pcie); 1863 1864 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ 1865 if (!pcie->supports_clkreq) { 1866 disable_aspm_l11(pcie); 1867 disable_aspm_l12(pcie); 1868 } 1869 1870 if (!pcie->of_data->has_l1ss_exit_fix) { 1871 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1872 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1873 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1874 } 1875 1876 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 1877 PCI_CAP_ID_EXP); 1878 1879 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); 1880 val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 1881 val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; 1882 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); 1883 1884 /* Clear Slot Clock Configuration bit if SRNS configuration */ 1885 if (pcie->enable_srns) { 1886 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 1887 PCI_EXP_LNKSTA); 1888 val_16 &= ~PCI_EXP_LNKSTA_SLC; 1889 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 1890 val_16); 1891 } 1892 1893 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 1894 1895 val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); 1896 val |= MSIX_ADDR_MATCH_LOW_OFF_EN; 1897 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); 1898 val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); 1899 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); 1900 1901 ret = dw_pcie_ep_init_complete(ep); 1902 if (ret) { 1903 dev_err(dev, "Failed to complete initialization: %d\n", ret); 1904 goto fail_init_complete; 1905 } 1906 1907 dw_pcie_ep_init_notify(ep); 1908 1909 /* Program the private control to allow sending LTR upstream */ 1910 if (pcie->of_data->has_ltr_req_fix) { 1911 val = appl_readl(pcie, APPL_LTR_MSG_2); 1912 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 1913 appl_writel(pcie, val, APPL_LTR_MSG_2); 1914 } 1915 1916 /* Enable LTSSM */ 1917 val = appl_readl(pcie, APPL_CTRL); 1918 val |= APPL_CTRL_LTSSM_EN; 1919 appl_writel(pcie, val, APPL_CTRL); 1920 1921 pcie->ep_state = EP_STATE_ENABLED; 1922 dev_dbg(dev, "Initialization of endpoint is completed\n"); 1923 1924 return; 1925 1926 fail_init_complete: 1927 reset_control_assert(pcie->core_rst); 1928 tegra_pcie_disable_phy(pcie); 1929 fail_phy: 1930 reset_control_assert(pcie->core_apb_rst); 1931 fail_core_apb_rst: 1932 clk_disable_unprepare(pcie->core_clk); 1933 fail_core_clk_enable: 1934 tegra_pcie_bpmp_set_pll_state(pcie, false); 1935 fail_pll_init: 1936 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1937 fail_set_ctrl_state: 1938 pm_runtime_put_sync(dev); 1939 } 1940 1941 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) 1942 { 1943 struct tegra_pcie_dw *pcie = arg; 1944 1945 if (gpiod_get_value(pcie->pex_rst_gpiod)) 1946 pex_ep_event_pex_rst_assert(pcie); 1947 else 1948 pex_ep_event_pex_rst_deassert(pcie); 1949 1950 return IRQ_HANDLED; 1951 } 1952 1953 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) 1954 { 1955 /* Tegra194 supports only INTA */ 1956 if (irq > 1) 1957 return -EINVAL; 1958 1959 appl_writel(pcie, 1, APPL_LEGACY_INTX); 1960 usleep_range(1000, 2000); 1961 appl_writel(pcie, 0, APPL_LEGACY_INTX); 1962 return 0; 1963 } 1964 1965 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) 1966 { 1967 if (unlikely(irq > 31)) 1968 return -EINVAL; 1969 1970 appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1); 1971 1972 return 0; 1973 } 1974 1975 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) 1976 { 1977 struct dw_pcie_ep *ep = &pcie->pci.ep; 1978 1979 writel(irq, ep->msi_mem); 1980 1981 return 0; 1982 } 1983 1984 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1985 enum pci_epc_irq_type type, 1986 u16 interrupt_num) 1987 { 1988 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1989 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1990 1991 switch (type) { 1992 case PCI_EPC_IRQ_LEGACY: 1993 return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); 1994 1995 case PCI_EPC_IRQ_MSI: 1996 return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); 1997 1998 case PCI_EPC_IRQ_MSIX: 1999 return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); 2000 2001 default: 2002 dev_err(pci->dev, "Unknown IRQ type\n"); 2003 return -EPERM; 2004 } 2005 2006 return 0; 2007 } 2008 2009 static const struct pci_epc_features tegra_pcie_epc_features = { 2010 .linkup_notifier = true, 2011 .core_init_notifier = true, 2012 .msi_capable = false, 2013 .msix_capable = false, 2014 .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, 2015 .bar_fixed_64bit = 1 << BAR_0, 2016 .bar_fixed_size[0] = SZ_1M, 2017 }; 2018 2019 static const struct pci_epc_features* 2020 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) 2021 { 2022 return &tegra_pcie_epc_features; 2023 } 2024 2025 static const struct dw_pcie_ep_ops pcie_ep_ops = { 2026 .raise_irq = tegra_pcie_ep_raise_irq, 2027 .get_features = tegra_pcie_ep_get_features, 2028 }; 2029 2030 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, 2031 struct platform_device *pdev) 2032 { 2033 struct dw_pcie *pci = &pcie->pci; 2034 struct device *dev = pcie->dev; 2035 struct dw_pcie_ep *ep; 2036 char *name; 2037 int ret; 2038 2039 ep = &pci->ep; 2040 ep->ops = &pcie_ep_ops; 2041 2042 ep->page_size = SZ_64K; 2043 2044 ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); 2045 if (ret < 0) { 2046 dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", 2047 ret); 2048 return ret; 2049 } 2050 2051 ret = gpiod_to_irq(pcie->pex_rst_gpiod); 2052 if (ret < 0) { 2053 dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); 2054 return ret; 2055 } 2056 pcie->pex_rst_irq = (unsigned int)ret; 2057 2058 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", 2059 pcie->cid); 2060 if (!name) { 2061 dev_err(dev, "Failed to create PERST IRQ string\n"); 2062 return -ENOMEM; 2063 } 2064 2065 irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); 2066 2067 pcie->ep_state = EP_STATE_DISABLED; 2068 2069 ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, 2070 tegra_pcie_ep_pex_rst_irq, 2071 IRQF_TRIGGER_RISING | 2072 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2073 name, (void *)pcie); 2074 if (ret < 0) { 2075 dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); 2076 return ret; 2077 } 2078 2079 pm_runtime_enable(dev); 2080 2081 ret = dw_pcie_ep_init(ep); 2082 if (ret) { 2083 dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", 2084 ret); 2085 pm_runtime_disable(dev); 2086 return ret; 2087 } 2088 2089 return 0; 2090 } 2091 2092 static int tegra_pcie_dw_probe(struct platform_device *pdev) 2093 { 2094 const struct tegra_pcie_dw_of_data *data; 2095 struct device *dev = &pdev->dev; 2096 struct resource *atu_dma_res; 2097 struct tegra_pcie_dw *pcie; 2098 struct dw_pcie_rp *pp; 2099 struct dw_pcie *pci; 2100 struct phy **phys; 2101 char *name; 2102 int ret; 2103 u32 i; 2104 2105 data = of_device_get_match_data(dev); 2106 2107 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 2108 if (!pcie) 2109 return -ENOMEM; 2110 2111 pci = &pcie->pci; 2112 pci->dev = &pdev->dev; 2113 pci->ops = &tegra_dw_pcie_ops; 2114 pcie->dev = &pdev->dev; 2115 pcie->of_data = (struct tegra_pcie_dw_of_data *)data; 2116 pci->n_fts[0] = pcie->of_data->n_fts[0]; 2117 pci->n_fts[1] = pcie->of_data->n_fts[1]; 2118 pp = &pci->pp; 2119 pp->num_vectors = MAX_MSI_IRQS; 2120 2121 ret = tegra_pcie_dw_parse_dt(pcie); 2122 if (ret < 0) { 2123 const char *level = KERN_ERR; 2124 2125 if (ret == -EPROBE_DEFER) 2126 level = KERN_DEBUG; 2127 2128 dev_printk(level, dev, 2129 dev_fmt("Failed to parse device tree: %d\n"), 2130 ret); 2131 return ret; 2132 } 2133 2134 ret = tegra_pcie_get_slot_regulators(pcie); 2135 if (ret < 0) { 2136 const char *level = KERN_ERR; 2137 2138 if (ret == -EPROBE_DEFER) 2139 level = KERN_DEBUG; 2140 2141 dev_printk(level, dev, 2142 dev_fmt("Failed to get slot regulators: %d\n"), 2143 ret); 2144 return ret; 2145 } 2146 2147 if (pcie->pex_refclk_sel_gpiod) 2148 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); 2149 2150 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); 2151 if (IS_ERR(pcie->pex_ctl_supply)) { 2152 ret = PTR_ERR(pcie->pex_ctl_supply); 2153 if (ret != -EPROBE_DEFER) 2154 dev_err(dev, "Failed to get regulator: %ld\n", 2155 PTR_ERR(pcie->pex_ctl_supply)); 2156 return ret; 2157 } 2158 2159 pcie->core_clk = devm_clk_get(dev, "core"); 2160 if (IS_ERR(pcie->core_clk)) { 2161 dev_err(dev, "Failed to get core clock: %ld\n", 2162 PTR_ERR(pcie->core_clk)); 2163 return PTR_ERR(pcie->core_clk); 2164 } 2165 2166 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2167 "appl"); 2168 if (!pcie->appl_res) { 2169 dev_err(dev, "Failed to find \"appl\" region\n"); 2170 return -ENODEV; 2171 } 2172 2173 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); 2174 if (IS_ERR(pcie->appl_base)) 2175 return PTR_ERR(pcie->appl_base); 2176 2177 pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); 2178 if (IS_ERR(pcie->core_apb_rst)) { 2179 dev_err(dev, "Failed to get APB reset: %ld\n", 2180 PTR_ERR(pcie->core_apb_rst)); 2181 return PTR_ERR(pcie->core_apb_rst); 2182 } 2183 2184 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); 2185 if (!phys) 2186 return -ENOMEM; 2187 2188 for (i = 0; i < pcie->phy_count; i++) { 2189 name = kasprintf(GFP_KERNEL, "p2u-%u", i); 2190 if (!name) { 2191 dev_err(dev, "Failed to create P2U string\n"); 2192 return -ENOMEM; 2193 } 2194 phys[i] = devm_phy_get(dev, name); 2195 kfree(name); 2196 if (IS_ERR(phys[i])) { 2197 ret = PTR_ERR(phys[i]); 2198 if (ret != -EPROBE_DEFER) 2199 dev_err(dev, "Failed to get PHY: %d\n", ret); 2200 return ret; 2201 } 2202 } 2203 2204 pcie->phys = phys; 2205 2206 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2207 "atu_dma"); 2208 if (!atu_dma_res) { 2209 dev_err(dev, "Failed to find \"atu_dma\" region\n"); 2210 return -ENODEV; 2211 } 2212 pcie->atu_dma_res = atu_dma_res; 2213 2214 pci->atu_size = resource_size(atu_dma_res); 2215 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); 2216 if (IS_ERR(pci->atu_base)) 2217 return PTR_ERR(pci->atu_base); 2218 2219 pcie->core_rst = devm_reset_control_get(dev, "core"); 2220 if (IS_ERR(pcie->core_rst)) { 2221 dev_err(dev, "Failed to get core reset: %ld\n", 2222 PTR_ERR(pcie->core_rst)); 2223 return PTR_ERR(pcie->core_rst); 2224 } 2225 2226 pp->irq = platform_get_irq_byname(pdev, "intr"); 2227 if (pp->irq < 0) 2228 return pp->irq; 2229 2230 pcie->bpmp = tegra_bpmp_get(dev); 2231 if (IS_ERR(pcie->bpmp)) 2232 return PTR_ERR(pcie->bpmp); 2233 2234 platform_set_drvdata(pdev, pcie); 2235 2236 switch (pcie->of_data->mode) { 2237 case DW_PCIE_RC_TYPE: 2238 ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, 2239 IRQF_SHARED, "tegra-pcie-intr", pcie); 2240 if (ret) { 2241 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2242 ret); 2243 goto fail; 2244 } 2245 2246 ret = tegra_pcie_config_rp(pcie); 2247 if (ret && ret != -ENOMEDIUM) 2248 goto fail; 2249 else 2250 return 0; 2251 break; 2252 2253 case DW_PCIE_EP_TYPE: 2254 ret = devm_request_threaded_irq(dev, pp->irq, 2255 tegra_pcie_ep_hard_irq, 2256 tegra_pcie_ep_irq_thread, 2257 IRQF_SHARED | IRQF_ONESHOT, 2258 "tegra-pcie-ep-intr", pcie); 2259 if (ret) { 2260 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2261 ret); 2262 goto fail; 2263 } 2264 2265 ret = tegra_pcie_config_ep(pcie, pdev); 2266 if (ret < 0) 2267 goto fail; 2268 break; 2269 2270 default: 2271 dev_err(dev, "Invalid PCIe device type %d\n", 2272 pcie->of_data->mode); 2273 } 2274 2275 fail: 2276 tegra_bpmp_put(pcie->bpmp); 2277 return ret; 2278 } 2279 2280 static int tegra_pcie_dw_remove(struct platform_device *pdev) 2281 { 2282 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2283 2284 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2285 if (!pcie->link_state) 2286 return 0; 2287 2288 debugfs_remove_recursive(pcie->debugfs); 2289 tegra_pcie_deinit_controller(pcie); 2290 pm_runtime_put_sync(pcie->dev); 2291 } else { 2292 disable_irq(pcie->pex_rst_irq); 2293 pex_ep_event_pex_rst_assert(pcie); 2294 } 2295 2296 pm_runtime_disable(pcie->dev); 2297 tegra_bpmp_put(pcie->bpmp); 2298 if (pcie->pex_refclk_sel_gpiod) 2299 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); 2300 2301 return 0; 2302 } 2303 2304 static int tegra_pcie_dw_suspend_late(struct device *dev) 2305 { 2306 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2307 u32 val; 2308 2309 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2310 dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n"); 2311 return -EPERM; 2312 } 2313 2314 if (!pcie->link_state) 2315 return 0; 2316 2317 /* Enable HW_HOT_RST mode */ 2318 if (!pcie->of_data->has_sbr_reset_fix) { 2319 val = appl_readl(pcie, APPL_CTRL); 2320 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2321 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2322 val |= APPL_CTRL_HW_HOT_RST_EN; 2323 appl_writel(pcie, val, APPL_CTRL); 2324 } 2325 2326 return 0; 2327 } 2328 2329 static int tegra_pcie_dw_suspend_noirq(struct device *dev) 2330 { 2331 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2332 2333 if (!pcie->link_state) 2334 return 0; 2335 2336 tegra_pcie_downstream_dev_to_D0(pcie); 2337 tegra_pcie_dw_pme_turnoff(pcie); 2338 tegra_pcie_unconfig_controller(pcie); 2339 2340 return 0; 2341 } 2342 2343 static int tegra_pcie_dw_resume_noirq(struct device *dev) 2344 { 2345 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2346 int ret; 2347 2348 if (!pcie->link_state) 2349 return 0; 2350 2351 ret = tegra_pcie_config_controller(pcie, true); 2352 if (ret < 0) 2353 return ret; 2354 2355 ret = tegra_pcie_dw_host_init(&pcie->pci.pp); 2356 if (ret < 0) { 2357 dev_err(dev, "Failed to init host: %d\n", ret); 2358 goto fail_host_init; 2359 } 2360 2361 dw_pcie_setup_rc(&pcie->pci.pp); 2362 2363 ret = tegra_pcie_dw_start_link(&pcie->pci); 2364 if (ret < 0) 2365 goto fail_host_init; 2366 2367 return 0; 2368 2369 fail_host_init: 2370 tegra_pcie_unconfig_controller(pcie); 2371 return ret; 2372 } 2373 2374 static int tegra_pcie_dw_resume_early(struct device *dev) 2375 { 2376 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2377 u32 val; 2378 2379 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2380 dev_err(dev, "Suspend is not supported in EP mode"); 2381 return -ENOTSUPP; 2382 } 2383 2384 if (!pcie->link_state) 2385 return 0; 2386 2387 /* Disable HW_HOT_RST mode */ 2388 if (!pcie->of_data->has_sbr_reset_fix) { 2389 val = appl_readl(pcie, APPL_CTRL); 2390 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2391 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2392 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << 2393 APPL_CTRL_HW_HOT_RST_MODE_SHIFT; 2394 val &= ~APPL_CTRL_HW_HOT_RST_EN; 2395 appl_writel(pcie, val, APPL_CTRL); 2396 } 2397 2398 return 0; 2399 } 2400 2401 static void tegra_pcie_dw_shutdown(struct platform_device *pdev) 2402 { 2403 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2404 2405 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2406 if (!pcie->link_state) 2407 return; 2408 2409 debugfs_remove_recursive(pcie->debugfs); 2410 tegra_pcie_downstream_dev_to_D0(pcie); 2411 2412 disable_irq(pcie->pci.pp.irq); 2413 if (IS_ENABLED(CONFIG_PCI_MSI)) 2414 disable_irq(pcie->pci.pp.msi_irq[0]); 2415 2416 tegra_pcie_dw_pme_turnoff(pcie); 2417 tegra_pcie_unconfig_controller(pcie); 2418 pm_runtime_put_sync(pcie->dev); 2419 } else { 2420 disable_irq(pcie->pex_rst_irq); 2421 pex_ep_event_pex_rst_assert(pcie); 2422 } 2423 } 2424 2425 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = { 2426 .version = TEGRA194_DWC_IP_VER, 2427 .mode = DW_PCIE_RC_TYPE, 2428 .cdm_chk_int_en_bit = BIT(19), 2429 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2430 .gen4_preset_vec = 0x360, 2431 .n_fts = { 52, 52 }, 2432 }; 2433 2434 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = { 2435 .version = TEGRA194_DWC_IP_VER, 2436 .mode = DW_PCIE_EP_TYPE, 2437 .cdm_chk_int_en_bit = BIT(19), 2438 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2439 .gen4_preset_vec = 0x360, 2440 .n_fts = { 52, 52 }, 2441 }; 2442 2443 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = { 2444 .version = TEGRA234_DWC_IP_VER, 2445 .mode = DW_PCIE_RC_TYPE, 2446 .has_msix_doorbell_access_fix = true, 2447 .has_sbr_reset_fix = true, 2448 .has_l1ss_exit_fix = true, 2449 .cdm_chk_int_en_bit = BIT(18), 2450 /* Gen4 - 6, 8 and 9 presets enabled */ 2451 .gen4_preset_vec = 0x340, 2452 .n_fts = { 52, 80 }, 2453 }; 2454 2455 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = { 2456 .version = TEGRA234_DWC_IP_VER, 2457 .mode = DW_PCIE_EP_TYPE, 2458 .has_l1ss_exit_fix = true, 2459 .has_ltr_req_fix = true, 2460 .cdm_chk_int_en_bit = BIT(18), 2461 /* Gen4 - 6, 8 and 9 presets enabled */ 2462 .gen4_preset_vec = 0x340, 2463 .n_fts = { 52, 80 }, 2464 }; 2465 2466 static const struct of_device_id tegra_pcie_dw_of_match[] = { 2467 { 2468 .compatible = "nvidia,tegra194-pcie", 2469 .data = &tegra194_pcie_dw_rc_of_data, 2470 }, 2471 { 2472 .compatible = "nvidia,tegra194-pcie-ep", 2473 .data = &tegra194_pcie_dw_ep_of_data, 2474 }, 2475 { 2476 .compatible = "nvidia,tegra234-pcie", 2477 .data = &tegra234_pcie_dw_rc_of_data, 2478 }, 2479 { 2480 .compatible = "nvidia,tegra234-pcie-ep", 2481 .data = &tegra234_pcie_dw_ep_of_data, 2482 }, 2483 {} 2484 }; 2485 2486 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { 2487 .suspend_late = tegra_pcie_dw_suspend_late, 2488 .suspend_noirq = tegra_pcie_dw_suspend_noirq, 2489 .resume_noirq = tegra_pcie_dw_resume_noirq, 2490 .resume_early = tegra_pcie_dw_resume_early, 2491 }; 2492 2493 static struct platform_driver tegra_pcie_dw_driver = { 2494 .probe = tegra_pcie_dw_probe, 2495 .remove = tegra_pcie_dw_remove, 2496 .shutdown = tegra_pcie_dw_shutdown, 2497 .driver = { 2498 .name = "tegra194-pcie", 2499 .pm = &tegra_pcie_dw_pm_ops, 2500 .of_match_table = tegra_pcie_dw_of_match, 2501 }, 2502 }; 2503 module_platform_driver(tegra_pcie_dw_driver); 2504 2505 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); 2506 2507 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); 2508 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); 2509 MODULE_LICENSE("GPL v2"); 2510