1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra194 SoC
4  *
5  * Copyright (C) 2019 NVIDIA Corporation.
6  *
7  * Author: Vidya Sagar <vidyas@nvidia.com>
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/gpio.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/random.h>
30 #include <linux/reset.h>
31 #include <linux/resource.h>
32 #include <linux/types.h>
33 #include "pcie-designware.h"
34 #include <soc/tegra/bpmp.h>
35 #include <soc/tegra/bpmp-abi.h>
36 #include "../../pci.h"
37 
38 #define APPL_PINMUX				0x0
39 #define APPL_PINMUX_PEX_RST			BIT(0)
40 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
41 #define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
42 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
43 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
44 
45 #define APPL_CTRL				0x4
46 #define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
47 #define APPL_CTRL_LTSSM_EN			BIT(7)
48 #define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
49 #define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
50 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
51 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
52 
53 #define APPL_INTR_EN_L0_0			0x8
54 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
55 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
56 #define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
57 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN	BIT(15)
58 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
59 #define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
60 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
61 
62 #define APPL_INTR_STATUS_L0			0xC
63 #define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
64 #define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
65 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT	BIT(15)
66 #define APPL_INTR_STATUS_L0_PEX_RST_INT		BIT(16)
67 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
68 
69 #define APPL_INTR_EN_L1_0_0				0x1C
70 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
71 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN		BIT(3)
72 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN	BIT(30)
73 
74 #define APPL_INTR_STATUS_L1_0_0				0x20
75 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
76 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED	BIT(3)
77 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE		BIT(30)
78 
79 #define APPL_INTR_STATUS_L1_1			0x2C
80 #define APPL_INTR_STATUS_L1_2			0x30
81 #define APPL_INTR_STATUS_L1_3			0x34
82 #define APPL_INTR_STATUS_L1_6			0x3C
83 #define APPL_INTR_STATUS_L1_7			0x40
84 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED	BIT(1)
85 
86 #define APPL_INTR_EN_L1_8_0			0x44
87 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
88 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
89 #define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
90 #define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
91 
92 #define APPL_INTR_STATUS_L1_8_0			0x4C
93 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
94 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
95 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
96 
97 #define APPL_INTR_STATUS_L1_9			0x54
98 #define APPL_INTR_STATUS_L1_10			0x58
99 #define APPL_INTR_STATUS_L1_11			0x64
100 #define APPL_INTR_STATUS_L1_13			0x74
101 #define APPL_INTR_STATUS_L1_14			0x78
102 #define APPL_INTR_STATUS_L1_15			0x7C
103 #define APPL_INTR_STATUS_L1_17			0x88
104 
105 #define APPL_INTR_EN_L1_18				0x90
106 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
107 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
108 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
109 
110 #define APPL_INTR_STATUS_L1_18				0x94
111 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
112 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
113 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
114 
115 #define APPL_MSI_CTRL_1				0xAC
116 
117 #define APPL_MSI_CTRL_2				0xB0
118 
119 #define APPL_LEGACY_INTX			0xB8
120 
121 #define APPL_LTR_MSG_1				0xC4
122 #define LTR_MSG_REQ				BIT(15)
123 #define LTR_MST_NO_SNOOP_SHIFT			16
124 
125 #define APPL_LTR_MSG_2				0xC8
126 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
127 
128 #define APPL_LINK_STATUS			0xCC
129 #define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
130 
131 #define APPL_DEBUG				0xD0
132 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
133 #define APPL_DEBUG_PM_LINKST_IN_L0		0x11
134 #define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
135 #define APPL_DEBUG_LTSSM_STATE_SHIFT		3
136 #define LTSSM_STATE_PRE_DETECT			5
137 
138 #define APPL_RADM_STATUS			0xE4
139 #define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
140 
141 #define APPL_DM_TYPE				0x100
142 #define APPL_DM_TYPE_MASK			GENMASK(3, 0)
143 #define APPL_DM_TYPE_RP				0x4
144 #define APPL_DM_TYPE_EP				0x0
145 
146 #define APPL_CFG_BASE_ADDR			0x104
147 #define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
148 
149 #define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
150 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
151 
152 #define APPL_CFG_MISC				0x110
153 #define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
154 #define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
155 #define APPL_CFG_MISC_ARCACHE_SHIFT		10
156 #define APPL_CFG_MISC_ARCACHE_VAL		3
157 
158 #define APPL_CFG_SLCG_OVERRIDE			0x114
159 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
160 
161 #define APPL_CAR_RESET_OVRD				0x12C
162 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
163 
164 #define IO_BASE_IO_DECODE				BIT(0)
165 #define IO_BASE_IO_DECODE_BIT8				BIT(8)
166 
167 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
168 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
169 
170 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
171 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
172 
173 #define N_FTS_VAL					52
174 #define FTS_VAL						52
175 
176 #define GEN3_EQ_CONTROL_OFF			0x8a8
177 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT	8
178 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)
179 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0)
180 
181 #define GEN3_RELATED_OFF			0x890
182 #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL	BIT(0)
183 #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE	BIT(16)
184 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT	24
185 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK	GENMASK(25, 24)
186 
187 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
188 #define AMBA_ERROR_RESPONSE_CRS_SHIFT		3
189 #define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0)
190 #define AMBA_ERROR_RESPONSE_CRS_OKAY		0
191 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF	1
192 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001	2
193 
194 #define MSIX_ADDR_MATCH_LOW_OFF			0x940
195 #define MSIX_ADDR_MATCH_LOW_OFF_EN		BIT(0)
196 #define MSIX_ADDR_MATCH_LOW_OFF_MASK		GENMASK(31, 2)
197 
198 #define MSIX_ADDR_MATCH_HIGH_OFF		0x944
199 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK		GENMASK(31, 0)
200 
201 #define PORT_LOGIC_MSIX_DOORBELL			0x948
202 
203 #define CAP_SPCIE_CAP_OFF			0x154
204 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
206 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
207 
208 #define PME_ACK_TIMEOUT 10000
209 
210 #define LTSSM_TIMEOUT 50000	/* 50ms */
211 
212 #define GEN3_GEN4_EQ_PRESET_INIT	5
213 
214 #define GEN1_CORE_CLK_FREQ	62500000
215 #define GEN2_CORE_CLK_FREQ	125000000
216 #define GEN3_CORE_CLK_FREQ	250000000
217 #define GEN4_CORE_CLK_FREQ	500000000
218 
219 #define LTR_MSG_TIMEOUT		(100 * 1000)
220 
221 #define PERST_DEBOUNCE_TIME	(5 * 1000)
222 
223 #define EP_STATE_DISABLED	0
224 #define EP_STATE_ENABLED	1
225 
226 static const unsigned int pcie_gen_freq[] = {
227 	GEN1_CORE_CLK_FREQ,
228 	GEN2_CORE_CLK_FREQ,
229 	GEN3_CORE_CLK_FREQ,
230 	GEN4_CORE_CLK_FREQ
231 };
232 
233 struct tegra_pcie_dw {
234 	struct device *dev;
235 	struct resource *appl_res;
236 	struct resource *dbi_res;
237 	struct resource *atu_dma_res;
238 	void __iomem *appl_base;
239 	struct clk *core_clk;
240 	struct reset_control *core_apb_rst;
241 	struct reset_control *core_rst;
242 	struct dw_pcie pci;
243 	struct tegra_bpmp *bpmp;
244 
245 	enum dw_pcie_device_mode mode;
246 
247 	bool supports_clkreq;
248 	bool enable_cdm_check;
249 	bool link_state;
250 	bool update_fc_fixup;
251 	u8 init_link_width;
252 	u32 msi_ctrl_int;
253 	u32 num_lanes;
254 	u32 cid;
255 	u32 cfg_link_cap_l1sub;
256 	u32 ras_des_cap;
257 	u32 pcie_cap_base;
258 	u32 aspm_cmrt;
259 	u32 aspm_pwr_on_t;
260 	u32 aspm_l0s_enter_lat;
261 
262 	struct regulator *pex_ctl_supply;
263 	struct regulator *slot_ctl_3v3;
264 	struct regulator *slot_ctl_12v;
265 
266 	unsigned int phy_count;
267 	struct phy **phys;
268 
269 	struct dentry *debugfs;
270 
271 	/* Endpoint mode specific */
272 	struct gpio_desc *pex_rst_gpiod;
273 	struct gpio_desc *pex_refclk_sel_gpiod;
274 	unsigned int pex_rst_irq;
275 	int ep_state;
276 };
277 
278 struct tegra_pcie_dw_of_data {
279 	enum dw_pcie_device_mode mode;
280 };
281 
282 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
283 {
284 	return container_of(pci, struct tegra_pcie_dw, pci);
285 }
286 
287 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
288 			       const u32 reg)
289 {
290 	writel_relaxed(value, pcie->appl_base + reg);
291 }
292 
293 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
294 {
295 	return readl_relaxed(pcie->appl_base + reg);
296 }
297 
298 struct tegra_pcie_soc {
299 	enum dw_pcie_device_mode mode;
300 };
301 
302 static void apply_bad_link_workaround(struct pcie_port *pp)
303 {
304 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
305 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
306 	u32 current_link_width;
307 	u16 val;
308 
309 	/*
310 	 * NOTE:- Since this scenario is uncommon and link as such is not
311 	 * stable anyway, not waiting to confirm if link is really
312 	 * transitioning to Gen-2 speed
313 	 */
314 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
315 	if (val & PCI_EXP_LNKSTA_LBMS) {
316 		current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
317 				     PCI_EXP_LNKSTA_NLW_SHIFT;
318 		if (pcie->init_link_width > current_link_width) {
319 			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
320 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
321 						PCI_EXP_LNKCTL2);
322 			val &= ~PCI_EXP_LNKCTL2_TLS;
323 			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
324 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
325 					   PCI_EXP_LNKCTL2, val);
326 
327 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
328 						PCI_EXP_LNKCTL);
329 			val |= PCI_EXP_LNKCTL_RL;
330 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
331 					   PCI_EXP_LNKCTL, val);
332 		}
333 	}
334 }
335 
336 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
337 {
338 	struct tegra_pcie_dw *pcie = arg;
339 	struct dw_pcie *pci = &pcie->pci;
340 	struct pcie_port *pp = &pci->pp;
341 	u32 val, status_l0, status_l1;
342 	u16 val_w;
343 
344 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
345 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
346 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
347 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
348 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
349 			/* SBR & Surprise Link Down WAR */
350 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
351 			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
352 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
353 			udelay(1);
354 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
355 			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
356 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
357 
358 			val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
359 			val |= PORT_LOGIC_SPEED_CHANGE;
360 			dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
361 		}
362 	}
363 
364 	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
365 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
366 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
367 			appl_writel(pcie,
368 				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
369 				    APPL_INTR_STATUS_L1_8_0);
370 			apply_bad_link_workaround(pp);
371 		}
372 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
373 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
374 						  PCI_EXP_LNKSTA);
375 			val_w |= PCI_EXP_LNKSTA_LBMS;
376 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
377 					   PCI_EXP_LNKSTA, val_w);
378 
379 			appl_writel(pcie,
380 				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
381 				    APPL_INTR_STATUS_L1_8_0);
382 
383 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
384 						  PCI_EXP_LNKSTA);
385 			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
386 				PCI_EXP_LNKSTA_CLS);
387 		}
388 	}
389 
390 	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
391 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
392 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
393 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
394 			dev_info(pci->dev, "CDM check complete\n");
395 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
396 		}
397 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
398 			dev_err(pci->dev, "CDM comparison mismatch\n");
399 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
400 		}
401 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
402 			dev_err(pci->dev, "CDM Logic error\n");
403 			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
404 		}
405 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
406 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
407 		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
408 	}
409 
410 	return IRQ_HANDLED;
411 }
412 
413 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
414 {
415 	u32 val;
416 
417 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
418 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
419 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
420 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
421 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
422 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
423 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
424 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
425 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
426 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
427 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
428 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
429 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
430 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
431 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
432 	appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
433 
434 	val = appl_readl(pcie, APPL_CTRL);
435 	val |= APPL_CTRL_LTSSM_EN;
436 	appl_writel(pcie, val, APPL_CTRL);
437 }
438 
439 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
440 {
441 	struct tegra_pcie_dw *pcie = arg;
442 	struct dw_pcie *pci = &pcie->pci;
443 	u32 val, speed;
444 
445 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
446 		PCI_EXP_LNKSTA_CLS;
447 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
448 
449 	/* If EP doesn't advertise L1SS, just return */
450 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
451 	if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
452 		return IRQ_HANDLED;
453 
454 	/* Check if BME is set to '1' */
455 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
456 	if (val & PCI_COMMAND_MASTER) {
457 		ktime_t timeout;
458 
459 		/* 110us for both snoop and no-snoop */
460 		val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
461 		val |= (val << LTR_MST_NO_SNOOP_SHIFT);
462 		appl_writel(pcie, val, APPL_LTR_MSG_1);
463 
464 		/* Send LTR upstream */
465 		val = appl_readl(pcie, APPL_LTR_MSG_2);
466 		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
467 		appl_writel(pcie, val, APPL_LTR_MSG_2);
468 
469 		timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
470 		for (;;) {
471 			val = appl_readl(pcie, APPL_LTR_MSG_2);
472 			if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
473 				break;
474 			if (ktime_after(ktime_get(), timeout))
475 				break;
476 			usleep_range(1000, 1100);
477 		}
478 		if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
479 			dev_err(pcie->dev, "Failed to send LTR message\n");
480 	}
481 
482 	return IRQ_HANDLED;
483 }
484 
485 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
486 {
487 	struct tegra_pcie_dw *pcie = arg;
488 	struct dw_pcie_ep *ep = &pcie->pci.ep;
489 	int spurious = 1;
490 	u32 status_l0, status_l1, link_status;
491 
492 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
493 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
494 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
495 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
496 
497 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
498 			pex_ep_event_hot_rst_done(pcie);
499 
500 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
501 			link_status = appl_readl(pcie, APPL_LINK_STATUS);
502 			if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
503 				dev_dbg(pcie->dev, "Link is up with Host\n");
504 				dw_pcie_ep_linkup(ep);
505 			}
506 		}
507 
508 		spurious = 0;
509 	}
510 
511 	if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
512 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
513 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
514 
515 		if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
516 			return IRQ_WAKE_THREAD;
517 
518 		spurious = 0;
519 	}
520 
521 	if (spurious) {
522 		dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
523 			 status_l0);
524 		appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
525 	}
526 
527 	return IRQ_HANDLED;
528 }
529 
530 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
531 				     int size, u32 *val)
532 {
533 	/*
534 	 * This is an endpoint mode specific register happen to appear even
535 	 * when controller is operating in root port mode and system hangs
536 	 * when it is accessed with link being in ASPM-L1 state.
537 	 * So skip accessing it altogether
538 	 */
539 	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
540 		*val = 0x00000000;
541 		return PCIBIOS_SUCCESSFUL;
542 	}
543 
544 	return pci_generic_config_read(bus, devfn, where, size, val);
545 }
546 
547 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
548 				     int size, u32 val)
549 {
550 	/*
551 	 * This is an endpoint mode specific register happen to appear even
552 	 * when controller is operating in root port mode and system hangs
553 	 * when it is accessed with link being in ASPM-L1 state.
554 	 * So skip accessing it altogether
555 	 */
556 	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
557 		return PCIBIOS_SUCCESSFUL;
558 
559 	return pci_generic_config_write(bus, devfn, where, size, val);
560 }
561 
562 static struct pci_ops tegra_pci_ops = {
563 	.map_bus = dw_pcie_own_conf_map_bus,
564 	.read = tegra_pcie_dw_rd_own_conf,
565 	.write = tegra_pcie_dw_wr_own_conf,
566 };
567 
568 #if defined(CONFIG_PCIEASPM)
569 static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
570 {
571 	u32 val;
572 
573 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
574 	val &= ~PCI_L1SS_CAP_ASPM_L1_1;
575 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
576 }
577 
578 static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
579 {
580 	u32 val;
581 
582 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
583 	val &= ~PCI_L1SS_CAP_ASPM_L1_2;
584 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
585 }
586 
587 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
588 {
589 	u32 val;
590 
591 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
592 				PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
593 	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
594 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
595 	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
596 	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
597 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
598 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
599 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
600 				PCIE_RAS_DES_EVENT_COUNTER_DATA);
601 
602 	return val;
603 }
604 
605 static int aspm_state_cnt(struct seq_file *s, void *data)
606 {
607 	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
608 				     dev_get_drvdata(s->private);
609 	u32 val;
610 
611 	seq_printf(s, "Tx L0s entry count : %u\n",
612 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
613 
614 	seq_printf(s, "Rx L0s entry count : %u\n",
615 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
616 
617 	seq_printf(s, "Link L1 entry count : %u\n",
618 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
619 
620 	seq_printf(s, "Link L1.1 entry count : %u\n",
621 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
622 
623 	seq_printf(s, "Link L1.2 entry count : %u\n",
624 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
625 
626 	/* Clear all counters */
627 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
628 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
629 			   EVENT_COUNTER_ALL_CLEAR);
630 
631 	/* Re-enable counting */
632 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
633 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
634 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
635 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
636 
637 	return 0;
638 }
639 
640 static void init_host_aspm(struct tegra_pcie_dw *pcie)
641 {
642 	struct dw_pcie *pci = &pcie->pci;
643 	u32 val;
644 
645 	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
646 	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
647 
648 	pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
649 							PCI_EXT_CAP_ID_VNDR);
650 
651 	/* Enable ASPM counters */
652 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
653 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
654 	dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
655 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
656 
657 	/* Program T_cmrt and T_pwr_on values */
658 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
659 	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
660 	val |= (pcie->aspm_cmrt << 8);
661 	val |= (pcie->aspm_pwr_on_t << 19);
662 	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
663 
664 	/* Program L0s and L1 entrance latencies */
665 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
666 	val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
667 	val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
668 	val |= PORT_AFR_ENTER_ASPM;
669 	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
670 }
671 
672 static void init_debugfs(struct tegra_pcie_dw *pcie)
673 {
674 	debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
675 				    aspm_state_cnt);
676 }
677 #else
678 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
679 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
680 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
681 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
682 #endif
683 
684 static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
685 {
686 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
687 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
688 	u32 val;
689 	u16 val_w;
690 
691 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
692 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
693 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
694 
695 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
696 	val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
697 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
698 
699 	if (pcie->enable_cdm_check) {
700 		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
701 		val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
702 		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
703 
704 		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
705 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
706 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
707 		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
708 	}
709 
710 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
711 				  PCI_EXP_LNKSTA);
712 	pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
713 				PCI_EXP_LNKSTA_NLW_SHIFT;
714 
715 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
716 				  PCI_EXP_LNKCTL);
717 	val_w |= PCI_EXP_LNKCTL_LBMIE;
718 	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
719 			   val_w);
720 }
721 
722 static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
723 {
724 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
725 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
726 	u32 val;
727 
728 	/* Enable legacy interrupt generation */
729 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
730 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
731 	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
732 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
733 
734 	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
735 	val |= APPL_INTR_EN_L1_8_INTX_EN;
736 	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
737 	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
738 	if (IS_ENABLED(CONFIG_PCIEAER))
739 		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
740 	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
741 }
742 
743 static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
744 {
745 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
746 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
747 	u32 val;
748 
749 	/* Enable MSI interrupt generation */
750 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
751 	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
752 	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
753 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
754 }
755 
756 static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
757 {
758 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
759 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
760 
761 	/* Clear interrupt statuses before enabling interrupts */
762 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
763 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
764 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
765 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
766 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
767 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
768 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
769 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
770 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
771 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
772 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
773 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
774 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
775 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
776 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
777 
778 	tegra_pcie_enable_system_interrupts(pp);
779 	tegra_pcie_enable_legacy_interrupts(pp);
780 	if (IS_ENABLED(CONFIG_PCI_MSI))
781 		tegra_pcie_enable_msi_interrupts(pp);
782 }
783 
784 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
785 {
786 	struct dw_pcie *pci = &pcie->pci;
787 	u32 val, offset, i;
788 
789 	/* Program init preset */
790 	for (i = 0; i < pcie->num_lanes; i++) {
791 		val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
792 		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
793 		val |= GEN3_GEN4_EQ_PRESET_INIT;
794 		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
795 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
796 			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
797 		dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
798 
799 		offset = dw_pcie_find_ext_capability(pci,
800 						     PCI_EXT_CAP_ID_PL_16GT) +
801 				PCI_PL_16GT_LE_CTRL;
802 		val = dw_pcie_readb_dbi(pci, offset + i);
803 		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
804 		val |= GEN3_GEN4_EQ_PRESET_INIT;
805 		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
806 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
807 			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
808 		dw_pcie_writeb_dbi(pci, offset + i, val);
809 	}
810 
811 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
812 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
813 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
814 
815 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
816 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
817 	val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
818 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
819 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
820 
821 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
822 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
823 	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
824 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
825 
826 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
827 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
828 	val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
829 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
830 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
831 
832 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
833 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
834 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
835 }
836 
837 static int tegra_pcie_dw_host_init(struct pcie_port *pp)
838 {
839 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
840 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
841 	u32 val;
842 	u16 val_16;
843 
844 	pp->bridge->ops = &tegra_pci_ops;
845 
846 	if (!pcie->pcie_cap_base)
847 		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
848 							      PCI_CAP_ID_EXP);
849 
850 	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
851 	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
852 	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
853 	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
854 
855 	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
856 	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
857 	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
858 
859 	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
860 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
861 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
862 	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
863 
864 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
865 
866 	/* Enable as 0xFFFF0001 response for CRS */
867 	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
868 	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
869 	val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
870 		AMBA_ERROR_RESPONSE_CRS_SHIFT);
871 	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
872 
873 	/* Configure Max lane width from DT */
874 	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
875 	val &= ~PCI_EXP_LNKCAP_MLW;
876 	val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
877 	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
878 
879 	config_gen3_gen4_eq_presets(pcie);
880 
881 	init_host_aspm(pcie);
882 
883 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
884 	if (!pcie->supports_clkreq) {
885 		disable_aspm_l11(pcie);
886 		disable_aspm_l12(pcie);
887 	}
888 
889 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
890 	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
891 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
892 
893 	if (pcie->update_fc_fixup) {
894 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
895 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
896 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
897 	}
898 
899 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
900 
901 	return 0;
902 }
903 
904 static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
905 {
906 	u32 val, offset, speed, tmp;
907 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
908 	struct pcie_port *pp = &pci->pp;
909 	bool retry = true;
910 
911 	if (pcie->mode == DW_PCIE_EP_TYPE) {
912 		enable_irq(pcie->pex_rst_irq);
913 		return 0;
914 	}
915 
916 retry_link:
917 	/* Assert RST */
918 	val = appl_readl(pcie, APPL_PINMUX);
919 	val &= ~APPL_PINMUX_PEX_RST;
920 	appl_writel(pcie, val, APPL_PINMUX);
921 
922 	usleep_range(100, 200);
923 
924 	/* Enable LTSSM */
925 	val = appl_readl(pcie, APPL_CTRL);
926 	val |= APPL_CTRL_LTSSM_EN;
927 	appl_writel(pcie, val, APPL_CTRL);
928 
929 	/* De-assert RST */
930 	val = appl_readl(pcie, APPL_PINMUX);
931 	val |= APPL_PINMUX_PEX_RST;
932 	appl_writel(pcie, val, APPL_PINMUX);
933 
934 	msleep(100);
935 
936 	if (dw_pcie_wait_for_link(pci)) {
937 		if (!retry)
938 			return 0;
939 		/*
940 		 * There are some endpoints which can't get the link up if
941 		 * root port has Data Link Feature (DLF) enabled.
942 		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
943 		 * on Scaled Flow Control and DLF.
944 		 * So, need to confirm that is indeed the case here and attempt
945 		 * link up once again with DLF disabled.
946 		 */
947 		val = appl_readl(pcie, APPL_DEBUG);
948 		val &= APPL_DEBUG_LTSSM_STATE_MASK;
949 		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
950 		tmp = appl_readl(pcie, APPL_LINK_STATUS);
951 		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
952 		if (!(val == 0x11 && !tmp)) {
953 			/* Link is down for all good reasons */
954 			return 0;
955 		}
956 
957 		dev_info(pci->dev, "Link is down in DLL");
958 		dev_info(pci->dev, "Trying again with DLFE disabled\n");
959 		/* Disable LTSSM */
960 		val = appl_readl(pcie, APPL_CTRL);
961 		val &= ~APPL_CTRL_LTSSM_EN;
962 		appl_writel(pcie, val, APPL_CTRL);
963 
964 		reset_control_assert(pcie->core_rst);
965 		reset_control_deassert(pcie->core_rst);
966 
967 		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
968 		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
969 		val &= ~PCI_DLF_EXCHANGE_ENABLE;
970 		dw_pcie_writel_dbi(pci, offset, val);
971 
972 		tegra_pcie_dw_host_init(pp);
973 		dw_pcie_setup_rc(pp);
974 
975 		retry = false;
976 		goto retry_link;
977 	}
978 
979 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
980 		PCI_EXP_LNKSTA_CLS;
981 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
982 
983 	tegra_pcie_enable_interrupts(pp);
984 
985 	return 0;
986 }
987 
988 static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
989 {
990 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
991 	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
992 
993 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
994 }
995 
996 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
997 {
998 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
999 
1000 	disable_irq(pcie->pex_rst_irq);
1001 }
1002 
1003 static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1004 	.link_up = tegra_pcie_dw_link_up,
1005 	.start_link = tegra_pcie_dw_start_link,
1006 	.stop_link = tegra_pcie_dw_stop_link,
1007 };
1008 
1009 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
1010 	.host_init = tegra_pcie_dw_host_init,
1011 };
1012 
1013 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
1014 {
1015 	unsigned int phy_count = pcie->phy_count;
1016 
1017 	while (phy_count--) {
1018 		phy_power_off(pcie->phys[phy_count]);
1019 		phy_exit(pcie->phys[phy_count]);
1020 	}
1021 }
1022 
1023 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
1024 {
1025 	unsigned int i;
1026 	int ret;
1027 
1028 	for (i = 0; i < pcie->phy_count; i++) {
1029 		ret = phy_init(pcie->phys[i]);
1030 		if (ret < 0)
1031 			goto phy_power_off;
1032 
1033 		ret = phy_power_on(pcie->phys[i]);
1034 		if (ret < 0)
1035 			goto phy_exit;
1036 	}
1037 
1038 	return 0;
1039 
1040 phy_power_off:
1041 	while (i--) {
1042 		phy_power_off(pcie->phys[i]);
1043 phy_exit:
1044 		phy_exit(pcie->phys[i]);
1045 	}
1046 
1047 	return ret;
1048 }
1049 
1050 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
1051 {
1052 	struct platform_device *pdev = to_platform_device(pcie->dev);
1053 	struct device_node *np = pcie->dev->of_node;
1054 	int ret;
1055 
1056 	pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1057 	if (!pcie->dbi_res) {
1058 		dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1059 		return -ENODEV;
1060 	}
1061 
1062 	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1063 	if (ret < 0) {
1064 		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1065 		return ret;
1066 	}
1067 
1068 	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1069 				   &pcie->aspm_pwr_on_t);
1070 	if (ret < 0)
1071 		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1072 			 ret);
1073 
1074 	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1075 				   &pcie->aspm_l0s_enter_lat);
1076 	if (ret < 0)
1077 		dev_info(pcie->dev,
1078 			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1079 
1080 	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1081 	if (ret < 0) {
1082 		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1083 		return ret;
1084 	}
1085 
1086 	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1087 	if (ret) {
1088 		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1089 		return ret;
1090 	}
1091 
1092 	ret = of_property_count_strings(np, "phy-names");
1093 	if (ret < 0) {
1094 		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1095 			ret);
1096 		return ret;
1097 	}
1098 	pcie->phy_count = ret;
1099 
1100 	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1101 		pcie->update_fc_fixup = true;
1102 
1103 	pcie->supports_clkreq =
1104 		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1105 
1106 	pcie->enable_cdm_check =
1107 		of_property_read_bool(np, "snps,enable-cdm-check");
1108 
1109 	if (pcie->mode == DW_PCIE_RC_TYPE)
1110 		return 0;
1111 
1112 	/* Endpoint mode specific DT entries */
1113 	pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1114 	if (IS_ERR(pcie->pex_rst_gpiod)) {
1115 		int err = PTR_ERR(pcie->pex_rst_gpiod);
1116 		const char *level = KERN_ERR;
1117 
1118 		if (err == -EPROBE_DEFER)
1119 			level = KERN_DEBUG;
1120 
1121 		dev_printk(level, pcie->dev,
1122 			   dev_fmt("Failed to get PERST GPIO: %d\n"),
1123 			   err);
1124 		return err;
1125 	}
1126 
1127 	pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1128 						    "nvidia,refclk-select",
1129 						    GPIOD_OUT_HIGH);
1130 	if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1131 		int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1132 		const char *level = KERN_ERR;
1133 
1134 		if (err == -EPROBE_DEFER)
1135 			level = KERN_DEBUG;
1136 
1137 		dev_printk(level, pcie->dev,
1138 			   dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1139 			   err);
1140 		pcie->pex_refclk_sel_gpiod = NULL;
1141 	}
1142 
1143 	return 0;
1144 }
1145 
1146 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
1147 					  bool enable)
1148 {
1149 	struct mrq_uphy_response resp;
1150 	struct tegra_bpmp_message msg;
1151 	struct mrq_uphy_request req;
1152 
1153 	/* Controller-5 doesn't need to have its state set by BPMP-FW */
1154 	if (pcie->cid == 5)
1155 		return 0;
1156 
1157 	memset(&req, 0, sizeof(req));
1158 	memset(&resp, 0, sizeof(resp));
1159 
1160 	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1161 	req.controller_state.pcie_controller = pcie->cid;
1162 	req.controller_state.enable = enable;
1163 
1164 	memset(&msg, 0, sizeof(msg));
1165 	msg.mrq = MRQ_UPHY;
1166 	msg.tx.data = &req;
1167 	msg.tx.size = sizeof(req);
1168 	msg.rx.data = &resp;
1169 	msg.rx.size = sizeof(resp);
1170 
1171 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1172 }
1173 
1174 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
1175 					 bool enable)
1176 {
1177 	struct mrq_uphy_response resp;
1178 	struct tegra_bpmp_message msg;
1179 	struct mrq_uphy_request req;
1180 
1181 	memset(&req, 0, sizeof(req));
1182 	memset(&resp, 0, sizeof(resp));
1183 
1184 	if (enable) {
1185 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1186 		req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1187 	} else {
1188 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1189 		req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1190 	}
1191 
1192 	memset(&msg, 0, sizeof(msg));
1193 	msg.mrq = MRQ_UPHY;
1194 	msg.tx.data = &req;
1195 	msg.tx.size = sizeof(req);
1196 	msg.rx.data = &resp;
1197 	msg.rx.size = sizeof(resp);
1198 
1199 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1200 }
1201 
1202 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1203 {
1204 	struct pcie_port *pp = &pcie->pci.pp;
1205 	struct pci_bus *child, *root_bus = NULL;
1206 	struct pci_dev *pdev;
1207 
1208 	/*
1209 	 * link doesn't go into L2 state with some of the endpoints with Tegra
1210 	 * if they are not in D0 state. So, need to make sure that immediate
1211 	 * downstream devices are in D0 state before sending PME_TurnOff to put
1212 	 * link into L2 state.
1213 	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1214 	 * 5.2 Link State Power Management (Page #428).
1215 	 */
1216 
1217 	list_for_each_entry(child, &pp->bridge->bus->children, node) {
1218 		/* Bring downstream devices to D0 if they are not already in */
1219 		if (child->parent == pp->bridge->bus) {
1220 			root_bus = child;
1221 			break;
1222 		}
1223 	}
1224 
1225 	if (!root_bus) {
1226 		dev_err(pcie->dev, "Failed to find downstream devices\n");
1227 		return;
1228 	}
1229 
1230 	list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1231 		if (PCI_SLOT(pdev->devfn) == 0) {
1232 			if (pci_set_power_state(pdev, PCI_D0))
1233 				dev_err(pcie->dev,
1234 					"Failed to transition %s to D0 state\n",
1235 					dev_name(&pdev->dev));
1236 		}
1237 	}
1238 }
1239 
1240 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1241 {
1242 	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1243 	if (IS_ERR(pcie->slot_ctl_3v3)) {
1244 		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1245 			return PTR_ERR(pcie->slot_ctl_3v3);
1246 
1247 		pcie->slot_ctl_3v3 = NULL;
1248 	}
1249 
1250 	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1251 	if (IS_ERR(pcie->slot_ctl_12v)) {
1252 		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1253 			return PTR_ERR(pcie->slot_ctl_12v);
1254 
1255 		pcie->slot_ctl_12v = NULL;
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1262 {
1263 	int ret;
1264 
1265 	if (pcie->slot_ctl_3v3) {
1266 		ret = regulator_enable(pcie->slot_ctl_3v3);
1267 		if (ret < 0) {
1268 			dev_err(pcie->dev,
1269 				"Failed to enable 3.3V slot supply: %d\n", ret);
1270 			return ret;
1271 		}
1272 	}
1273 
1274 	if (pcie->slot_ctl_12v) {
1275 		ret = regulator_enable(pcie->slot_ctl_12v);
1276 		if (ret < 0) {
1277 			dev_err(pcie->dev,
1278 				"Failed to enable 12V slot supply: %d\n", ret);
1279 			goto fail_12v_enable;
1280 		}
1281 	}
1282 
1283 	/*
1284 	 * According to PCI Express Card Electromechanical Specification
1285 	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1286 	 * should be a minimum of 100ms.
1287 	 */
1288 	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1289 		msleep(100);
1290 
1291 	return 0;
1292 
1293 fail_12v_enable:
1294 	if (pcie->slot_ctl_3v3)
1295 		regulator_disable(pcie->slot_ctl_3v3);
1296 	return ret;
1297 }
1298 
1299 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1300 {
1301 	if (pcie->slot_ctl_12v)
1302 		regulator_disable(pcie->slot_ctl_12v);
1303 	if (pcie->slot_ctl_3v3)
1304 		regulator_disable(pcie->slot_ctl_3v3);
1305 }
1306 
1307 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1308 					bool en_hw_hot_rst)
1309 {
1310 	int ret;
1311 	u32 val;
1312 
1313 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1314 	if (ret) {
1315 		dev_err(pcie->dev,
1316 			"Failed to enable controller %u: %d\n", pcie->cid, ret);
1317 		return ret;
1318 	}
1319 
1320 	ret = tegra_pcie_enable_slot_regulators(pcie);
1321 	if (ret < 0)
1322 		goto fail_slot_reg_en;
1323 
1324 	ret = regulator_enable(pcie->pex_ctl_supply);
1325 	if (ret < 0) {
1326 		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1327 		goto fail_reg_en;
1328 	}
1329 
1330 	ret = clk_prepare_enable(pcie->core_clk);
1331 	if (ret) {
1332 		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1333 		goto fail_core_clk;
1334 	}
1335 
1336 	ret = reset_control_deassert(pcie->core_apb_rst);
1337 	if (ret) {
1338 		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1339 			ret);
1340 		goto fail_core_apb_rst;
1341 	}
1342 
1343 	if (en_hw_hot_rst) {
1344 		/* Enable HW_HOT_RST mode */
1345 		val = appl_readl(pcie, APPL_CTRL);
1346 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1347 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1348 		val |= APPL_CTRL_HW_HOT_RST_EN;
1349 		appl_writel(pcie, val, APPL_CTRL);
1350 	}
1351 
1352 	ret = tegra_pcie_enable_phy(pcie);
1353 	if (ret) {
1354 		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1355 		goto fail_phy;
1356 	}
1357 
1358 	/* Update CFG base address */
1359 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1360 		    APPL_CFG_BASE_ADDR);
1361 
1362 	/* Configure this core for RP mode operation */
1363 	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1364 
1365 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1366 
1367 	val = appl_readl(pcie, APPL_CTRL);
1368 	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1369 
1370 	val = appl_readl(pcie, APPL_CFG_MISC);
1371 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1372 	appl_writel(pcie, val, APPL_CFG_MISC);
1373 
1374 	if (!pcie->supports_clkreq) {
1375 		val = appl_readl(pcie, APPL_PINMUX);
1376 		val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1377 		val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1378 		appl_writel(pcie, val, APPL_PINMUX);
1379 	}
1380 
1381 	/* Update iATU_DMA base address */
1382 	appl_writel(pcie,
1383 		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1384 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1385 
1386 	reset_control_deassert(pcie->core_rst);
1387 
1388 	return ret;
1389 
1390 fail_phy:
1391 	reset_control_assert(pcie->core_apb_rst);
1392 fail_core_apb_rst:
1393 	clk_disable_unprepare(pcie->core_clk);
1394 fail_core_clk:
1395 	regulator_disable(pcie->pex_ctl_supply);
1396 fail_reg_en:
1397 	tegra_pcie_disable_slot_regulators(pcie);
1398 fail_slot_reg_en:
1399 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1400 
1401 	return ret;
1402 }
1403 
1404 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
1405 {
1406 	int ret;
1407 
1408 	ret = reset_control_assert(pcie->core_rst);
1409 	if (ret)
1410 		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1411 
1412 	tegra_pcie_disable_phy(pcie);
1413 
1414 	ret = reset_control_assert(pcie->core_apb_rst);
1415 	if (ret)
1416 		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1417 
1418 	clk_disable_unprepare(pcie->core_clk);
1419 
1420 	ret = regulator_disable(pcie->pex_ctl_supply);
1421 	if (ret)
1422 		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1423 
1424 	tegra_pcie_disable_slot_regulators(pcie);
1425 
1426 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1427 	if (ret)
1428 		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1429 			pcie->cid, ret);
1430 }
1431 
1432 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1433 {
1434 	struct dw_pcie *pci = &pcie->pci;
1435 	struct pcie_port *pp = &pci->pp;
1436 	int ret;
1437 
1438 	ret = tegra_pcie_config_controller(pcie, false);
1439 	if (ret < 0)
1440 		return ret;
1441 
1442 	pp->ops = &tegra_pcie_dw_host_ops;
1443 
1444 	ret = dw_pcie_host_init(pp);
1445 	if (ret < 0) {
1446 		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1447 		goto fail_host_init;
1448 	}
1449 
1450 	return 0;
1451 
1452 fail_host_init:
1453 	tegra_pcie_unconfig_controller(pcie);
1454 	return ret;
1455 }
1456 
1457 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1458 {
1459 	u32 val;
1460 
1461 	if (!tegra_pcie_dw_link_up(&pcie->pci))
1462 		return 0;
1463 
1464 	val = appl_readl(pcie, APPL_RADM_STATUS);
1465 	val |= APPL_PM_XMT_TURNOFF_STATE;
1466 	appl_writel(pcie, val, APPL_RADM_STATUS);
1467 
1468 	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1469 				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1470 				 1, PME_ACK_TIMEOUT);
1471 }
1472 
1473 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1474 {
1475 	u32 data;
1476 	int err;
1477 
1478 	if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1479 		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1480 		return;
1481 	}
1482 
1483 	/*
1484 	 * PCIe controller exits from L2 only if reset is applied, so
1485 	 * controller doesn't handle interrupts. But in cases where
1486 	 * L2 entry fails, PERST# is asserted which can trigger surprise
1487 	 * link down AER. However this function call happens in
1488 	 * suspend_noirq(), so AER interrupt will not be processed.
1489 	 * Disable all interrupts to avoid such a scenario.
1490 	 */
1491 	appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
1492 
1493 	if (tegra_pcie_try_link_l2(pcie)) {
1494 		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1495 		/*
1496 		 * TX lane clock freq will reset to Gen1 only if link is in L2
1497 		 * or detect state.
1498 		 * So apply pex_rst to end point to force RP to go into detect
1499 		 * state
1500 		 */
1501 		data = appl_readl(pcie, APPL_PINMUX);
1502 		data &= ~APPL_PINMUX_PEX_RST;
1503 		appl_writel(pcie, data, APPL_PINMUX);
1504 
1505 		/*
1506 		 * Some cards do not go to detect state even after de-asserting
1507 		 * PERST#. So, de-assert LTSSM to bring link to detect state.
1508 		 */
1509 		data = readl(pcie->appl_base + APPL_CTRL);
1510 		data &= ~APPL_CTRL_LTSSM_EN;
1511 		writel(data, pcie->appl_base + APPL_CTRL);
1512 
1513 		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1514 						data,
1515 						((data &
1516 						APPL_DEBUG_LTSSM_STATE_MASK) >>
1517 						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1518 						LTSSM_STATE_PRE_DETECT,
1519 						1, LTSSM_TIMEOUT);
1520 		if (err)
1521 			dev_info(pcie->dev, "Link didn't go to detect state\n");
1522 	}
1523 	/*
1524 	 * DBI registers may not be accessible after this as PLL-E would be
1525 	 * down depending on how CLKREQ is pulled by end point
1526 	 */
1527 	data = appl_readl(pcie, APPL_PINMUX);
1528 	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1529 	/* Cut REFCLK to slot */
1530 	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1531 	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1532 	appl_writel(pcie, data, APPL_PINMUX);
1533 }
1534 
1535 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1536 {
1537 	tegra_pcie_downstream_dev_to_D0(pcie);
1538 	dw_pcie_host_deinit(&pcie->pci.pp);
1539 	tegra_pcie_dw_pme_turnoff(pcie);
1540 	tegra_pcie_unconfig_controller(pcie);
1541 }
1542 
1543 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1544 {
1545 	struct device *dev = pcie->dev;
1546 	char *name;
1547 	int ret;
1548 
1549 	pm_runtime_enable(dev);
1550 
1551 	ret = pm_runtime_get_sync(dev);
1552 	if (ret < 0) {
1553 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1554 			ret);
1555 		goto fail_pm_get_sync;
1556 	}
1557 
1558 	ret = pinctrl_pm_select_default_state(dev);
1559 	if (ret < 0) {
1560 		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1561 		goto fail_pm_get_sync;
1562 	}
1563 
1564 	ret = tegra_pcie_init_controller(pcie);
1565 	if (ret < 0) {
1566 		dev_err(dev, "Failed to initialize controller: %d\n", ret);
1567 		goto fail_pm_get_sync;
1568 	}
1569 
1570 	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1571 	if (!pcie->link_state) {
1572 		ret = -ENOMEDIUM;
1573 		goto fail_host_init;
1574 	}
1575 
1576 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1577 	if (!name) {
1578 		ret = -ENOMEM;
1579 		goto fail_host_init;
1580 	}
1581 
1582 	pcie->debugfs = debugfs_create_dir(name, NULL);
1583 	init_debugfs(pcie);
1584 
1585 	return ret;
1586 
1587 fail_host_init:
1588 	tegra_pcie_deinit_controller(pcie);
1589 fail_pm_get_sync:
1590 	pm_runtime_put_sync(dev);
1591 	pm_runtime_disable(dev);
1592 	return ret;
1593 }
1594 
1595 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
1596 {
1597 	u32 val;
1598 	int ret;
1599 
1600 	if (pcie->ep_state == EP_STATE_DISABLED)
1601 		return;
1602 
1603 	/* Disable LTSSM */
1604 	val = appl_readl(pcie, APPL_CTRL);
1605 	val &= ~APPL_CTRL_LTSSM_EN;
1606 	appl_writel(pcie, val, APPL_CTRL);
1607 
1608 	ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1609 				 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1610 				 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1611 				 LTSSM_STATE_PRE_DETECT,
1612 				 1, LTSSM_TIMEOUT);
1613 	if (ret)
1614 		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1615 
1616 	reset_control_assert(pcie->core_rst);
1617 
1618 	tegra_pcie_disable_phy(pcie);
1619 
1620 	reset_control_assert(pcie->core_apb_rst);
1621 
1622 	clk_disable_unprepare(pcie->core_clk);
1623 
1624 	pm_runtime_put_sync(pcie->dev);
1625 
1626 	ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1627 	if (ret)
1628 		dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
1629 
1630 	pcie->ep_state = EP_STATE_DISABLED;
1631 	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1632 }
1633 
1634 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
1635 {
1636 	struct dw_pcie *pci = &pcie->pci;
1637 	struct dw_pcie_ep *ep = &pci->ep;
1638 	struct device *dev = pcie->dev;
1639 	u32 val;
1640 	int ret;
1641 	u16 val_16;
1642 
1643 	if (pcie->ep_state == EP_STATE_ENABLED)
1644 		return;
1645 
1646 	ret = pm_runtime_resume_and_get(dev);
1647 	if (ret < 0) {
1648 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1649 			ret);
1650 		return;
1651 	}
1652 
1653 	ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1654 	if (ret) {
1655 		dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
1656 		goto fail_pll_init;
1657 	}
1658 
1659 	ret = clk_prepare_enable(pcie->core_clk);
1660 	if (ret) {
1661 		dev_err(dev, "Failed to enable core clock: %d\n", ret);
1662 		goto fail_core_clk_enable;
1663 	}
1664 
1665 	ret = reset_control_deassert(pcie->core_apb_rst);
1666 	if (ret) {
1667 		dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1668 		goto fail_core_apb_rst;
1669 	}
1670 
1671 	ret = tegra_pcie_enable_phy(pcie);
1672 	if (ret) {
1673 		dev_err(dev, "Failed to enable PHY: %d\n", ret);
1674 		goto fail_phy;
1675 	}
1676 
1677 	/* Clear any stale interrupt statuses */
1678 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1679 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1680 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1681 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1682 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1683 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1684 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1685 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1686 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1687 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1688 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1689 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1690 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1691 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1692 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1693 
1694 	/* configure this core for EP mode operation */
1695 	val = appl_readl(pcie, APPL_DM_TYPE);
1696 	val &= ~APPL_DM_TYPE_MASK;
1697 	val |= APPL_DM_TYPE_EP;
1698 	appl_writel(pcie, val, APPL_DM_TYPE);
1699 
1700 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1701 
1702 	val = appl_readl(pcie, APPL_CTRL);
1703 	val |= APPL_CTRL_SYS_PRE_DET_STATE;
1704 	val |= APPL_CTRL_HW_HOT_RST_EN;
1705 	appl_writel(pcie, val, APPL_CTRL);
1706 
1707 	val = appl_readl(pcie, APPL_CFG_MISC);
1708 	val |= APPL_CFG_MISC_SLV_EP_MODE;
1709 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1710 	appl_writel(pcie, val, APPL_CFG_MISC);
1711 
1712 	val = appl_readl(pcie, APPL_PINMUX);
1713 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1714 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1715 	appl_writel(pcie, val, APPL_PINMUX);
1716 
1717 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1718 		    APPL_CFG_BASE_ADDR);
1719 
1720 	appl_writel(pcie, pcie->atu_dma_res->start &
1721 		    APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1722 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1723 
1724 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1725 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1726 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1727 	val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1728 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1729 
1730 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1731 	val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1732 	val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1733 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1734 
1735 	reset_control_deassert(pcie->core_rst);
1736 
1737 	if (pcie->update_fc_fixup) {
1738 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1739 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1740 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1741 	}
1742 
1743 	config_gen3_gen4_eq_presets(pcie);
1744 
1745 	init_host_aspm(pcie);
1746 
1747 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1748 	if (!pcie->supports_clkreq) {
1749 		disable_aspm_l11(pcie);
1750 		disable_aspm_l12(pcie);
1751 	}
1752 
1753 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1754 	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1755 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1756 
1757 	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1758 						      PCI_CAP_ID_EXP);
1759 
1760 	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
1761 	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
1762 	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
1763 	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
1764 
1765 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1766 
1767 	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1768 	val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1769 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1770 	val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1771 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1772 
1773 	ret = dw_pcie_ep_init_complete(ep);
1774 	if (ret) {
1775 		dev_err(dev, "Failed to complete initialization: %d\n", ret);
1776 		goto fail_init_complete;
1777 	}
1778 
1779 	dw_pcie_ep_init_notify(ep);
1780 
1781 	/* Enable LTSSM */
1782 	val = appl_readl(pcie, APPL_CTRL);
1783 	val |= APPL_CTRL_LTSSM_EN;
1784 	appl_writel(pcie, val, APPL_CTRL);
1785 
1786 	pcie->ep_state = EP_STATE_ENABLED;
1787 	dev_dbg(dev, "Initialization of endpoint is completed\n");
1788 
1789 	return;
1790 
1791 fail_init_complete:
1792 	reset_control_assert(pcie->core_rst);
1793 	tegra_pcie_disable_phy(pcie);
1794 fail_phy:
1795 	reset_control_assert(pcie->core_apb_rst);
1796 fail_core_apb_rst:
1797 	clk_disable_unprepare(pcie->core_clk);
1798 fail_core_clk_enable:
1799 	tegra_pcie_bpmp_set_pll_state(pcie, false);
1800 fail_pll_init:
1801 	pm_runtime_put_sync(dev);
1802 }
1803 
1804 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1805 {
1806 	struct tegra_pcie_dw *pcie = arg;
1807 
1808 	if (gpiod_get_value(pcie->pex_rst_gpiod))
1809 		pex_ep_event_pex_rst_assert(pcie);
1810 	else
1811 		pex_ep_event_pex_rst_deassert(pcie);
1812 
1813 	return IRQ_HANDLED;
1814 }
1815 
1816 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
1817 {
1818 	/* Tegra194 supports only INTA */
1819 	if (irq > 1)
1820 		return -EINVAL;
1821 
1822 	appl_writel(pcie, 1, APPL_LEGACY_INTX);
1823 	usleep_range(1000, 2000);
1824 	appl_writel(pcie, 0, APPL_LEGACY_INTX);
1825 	return 0;
1826 }
1827 
1828 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
1829 {
1830 	if (unlikely(irq > 31))
1831 		return -EINVAL;
1832 
1833 	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
1834 
1835 	return 0;
1836 }
1837 
1838 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
1839 {
1840 	struct dw_pcie_ep *ep = &pcie->pci.ep;
1841 
1842 	writel(irq, ep->msi_mem);
1843 
1844 	return 0;
1845 }
1846 
1847 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1848 				   enum pci_epc_irq_type type,
1849 				   u16 interrupt_num)
1850 {
1851 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1852 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1853 
1854 	switch (type) {
1855 	case PCI_EPC_IRQ_LEGACY:
1856 		return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
1857 
1858 	case PCI_EPC_IRQ_MSI:
1859 		return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1860 
1861 	case PCI_EPC_IRQ_MSIX:
1862 		return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
1863 
1864 	default:
1865 		dev_err(pci->dev, "Unknown IRQ type\n");
1866 		return -EPERM;
1867 	}
1868 
1869 	return 0;
1870 }
1871 
1872 static const struct pci_epc_features tegra_pcie_epc_features = {
1873 	.linkup_notifier = true,
1874 	.core_init_notifier = true,
1875 	.msi_capable = false,
1876 	.msix_capable = false,
1877 	.reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
1878 	.bar_fixed_64bit = 1 << BAR_0,
1879 	.bar_fixed_size[0] = SZ_1M,
1880 };
1881 
1882 static const struct pci_epc_features*
1883 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
1884 {
1885 	return &tegra_pcie_epc_features;
1886 }
1887 
1888 static const struct dw_pcie_ep_ops pcie_ep_ops = {
1889 	.raise_irq = tegra_pcie_ep_raise_irq,
1890 	.get_features = tegra_pcie_ep_get_features,
1891 };
1892 
1893 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
1894 				struct platform_device *pdev)
1895 {
1896 	struct dw_pcie *pci = &pcie->pci;
1897 	struct device *dev = pcie->dev;
1898 	struct dw_pcie_ep *ep;
1899 	char *name;
1900 	int ret;
1901 
1902 	ep = &pci->ep;
1903 	ep->ops = &pcie_ep_ops;
1904 
1905 	ep->page_size = SZ_64K;
1906 
1907 	ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
1908 	if (ret < 0) {
1909 		dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
1910 			ret);
1911 		return ret;
1912 	}
1913 
1914 	ret = gpiod_to_irq(pcie->pex_rst_gpiod);
1915 	if (ret < 0) {
1916 		dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
1917 		return ret;
1918 	}
1919 	pcie->pex_rst_irq = (unsigned int)ret;
1920 
1921 	name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
1922 			      pcie->cid);
1923 	if (!name) {
1924 		dev_err(dev, "Failed to create PERST IRQ string\n");
1925 		return -ENOMEM;
1926 	}
1927 
1928 	irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
1929 
1930 	pcie->ep_state = EP_STATE_DISABLED;
1931 
1932 	ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
1933 					tegra_pcie_ep_pex_rst_irq,
1934 					IRQF_TRIGGER_RISING |
1935 					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1936 					name, (void *)pcie);
1937 	if (ret < 0) {
1938 		dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
1939 		return ret;
1940 	}
1941 
1942 	pm_runtime_enable(dev);
1943 
1944 	ret = dw_pcie_ep_init(ep);
1945 	if (ret) {
1946 		dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
1947 			ret);
1948 		pm_runtime_disable(dev);
1949 		return ret;
1950 	}
1951 
1952 	return 0;
1953 }
1954 
1955 static int tegra_pcie_dw_probe(struct platform_device *pdev)
1956 {
1957 	const struct tegra_pcie_dw_of_data *data;
1958 	struct device *dev = &pdev->dev;
1959 	struct resource *atu_dma_res;
1960 	struct tegra_pcie_dw *pcie;
1961 	struct pcie_port *pp;
1962 	struct dw_pcie *pci;
1963 	struct phy **phys;
1964 	char *name;
1965 	int ret;
1966 	u32 i;
1967 
1968 	data = of_device_get_match_data(dev);
1969 
1970 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1971 	if (!pcie)
1972 		return -ENOMEM;
1973 
1974 	pci = &pcie->pci;
1975 	pci->dev = &pdev->dev;
1976 	pci->ops = &tegra_dw_pcie_ops;
1977 	pci->n_fts[0] = N_FTS_VAL;
1978 	pci->n_fts[1] = FTS_VAL;
1979 	pci->version = 0x490A;
1980 
1981 	pp = &pci->pp;
1982 	pp->num_vectors = MAX_MSI_IRQS;
1983 	pcie->dev = &pdev->dev;
1984 	pcie->mode = (enum dw_pcie_device_mode)data->mode;
1985 
1986 	ret = tegra_pcie_dw_parse_dt(pcie);
1987 	if (ret < 0) {
1988 		const char *level = KERN_ERR;
1989 
1990 		if (ret == -EPROBE_DEFER)
1991 			level = KERN_DEBUG;
1992 
1993 		dev_printk(level, dev,
1994 			   dev_fmt("Failed to parse device tree: %d\n"),
1995 			   ret);
1996 		return ret;
1997 	}
1998 
1999 	ret = tegra_pcie_get_slot_regulators(pcie);
2000 	if (ret < 0) {
2001 		const char *level = KERN_ERR;
2002 
2003 		if (ret == -EPROBE_DEFER)
2004 			level = KERN_DEBUG;
2005 
2006 		dev_printk(level, dev,
2007 			   dev_fmt("Failed to get slot regulators: %d\n"),
2008 			   ret);
2009 		return ret;
2010 	}
2011 
2012 	if (pcie->pex_refclk_sel_gpiod)
2013 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2014 
2015 	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2016 	if (IS_ERR(pcie->pex_ctl_supply)) {
2017 		ret = PTR_ERR(pcie->pex_ctl_supply);
2018 		if (ret != -EPROBE_DEFER)
2019 			dev_err(dev, "Failed to get regulator: %ld\n",
2020 				PTR_ERR(pcie->pex_ctl_supply));
2021 		return ret;
2022 	}
2023 
2024 	pcie->core_clk = devm_clk_get(dev, "core");
2025 	if (IS_ERR(pcie->core_clk)) {
2026 		dev_err(dev, "Failed to get core clock: %ld\n",
2027 			PTR_ERR(pcie->core_clk));
2028 		return PTR_ERR(pcie->core_clk);
2029 	}
2030 
2031 	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2032 						      "appl");
2033 	if (!pcie->appl_res) {
2034 		dev_err(dev, "Failed to find \"appl\" region\n");
2035 		return -ENODEV;
2036 	}
2037 
2038 	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2039 	if (IS_ERR(pcie->appl_base))
2040 		return PTR_ERR(pcie->appl_base);
2041 
2042 	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2043 	if (IS_ERR(pcie->core_apb_rst)) {
2044 		dev_err(dev, "Failed to get APB reset: %ld\n",
2045 			PTR_ERR(pcie->core_apb_rst));
2046 		return PTR_ERR(pcie->core_apb_rst);
2047 	}
2048 
2049 	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2050 	if (!phys)
2051 		return -ENOMEM;
2052 
2053 	for (i = 0; i < pcie->phy_count; i++) {
2054 		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2055 		if (!name) {
2056 			dev_err(dev, "Failed to create P2U string\n");
2057 			return -ENOMEM;
2058 		}
2059 		phys[i] = devm_phy_get(dev, name);
2060 		kfree(name);
2061 		if (IS_ERR(phys[i])) {
2062 			ret = PTR_ERR(phys[i]);
2063 			if (ret != -EPROBE_DEFER)
2064 				dev_err(dev, "Failed to get PHY: %d\n", ret);
2065 			return ret;
2066 		}
2067 	}
2068 
2069 	pcie->phys = phys;
2070 
2071 	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2072 						   "atu_dma");
2073 	if (!atu_dma_res) {
2074 		dev_err(dev, "Failed to find \"atu_dma\" region\n");
2075 		return -ENODEV;
2076 	}
2077 	pcie->atu_dma_res = atu_dma_res;
2078 
2079 	pci->atu_size = resource_size(atu_dma_res);
2080 	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2081 	if (IS_ERR(pci->atu_base))
2082 		return PTR_ERR(pci->atu_base);
2083 
2084 	pcie->core_rst = devm_reset_control_get(dev, "core");
2085 	if (IS_ERR(pcie->core_rst)) {
2086 		dev_err(dev, "Failed to get core reset: %ld\n",
2087 			PTR_ERR(pcie->core_rst));
2088 		return PTR_ERR(pcie->core_rst);
2089 	}
2090 
2091 	pp->irq = platform_get_irq_byname(pdev, "intr");
2092 	if (pp->irq < 0)
2093 		return pp->irq;
2094 
2095 	pcie->bpmp = tegra_bpmp_get(dev);
2096 	if (IS_ERR(pcie->bpmp))
2097 		return PTR_ERR(pcie->bpmp);
2098 
2099 	platform_set_drvdata(pdev, pcie);
2100 
2101 	switch (pcie->mode) {
2102 	case DW_PCIE_RC_TYPE:
2103 		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2104 				       IRQF_SHARED, "tegra-pcie-intr", pcie);
2105 		if (ret) {
2106 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2107 				ret);
2108 			goto fail;
2109 		}
2110 
2111 		ret = tegra_pcie_config_rp(pcie);
2112 		if (ret && ret != -ENOMEDIUM)
2113 			goto fail;
2114 		else
2115 			return 0;
2116 		break;
2117 
2118 	case DW_PCIE_EP_TYPE:
2119 		ret = devm_request_threaded_irq(dev, pp->irq,
2120 						tegra_pcie_ep_hard_irq,
2121 						tegra_pcie_ep_irq_thread,
2122 						IRQF_SHARED | IRQF_ONESHOT,
2123 						"tegra-pcie-ep-intr", pcie);
2124 		if (ret) {
2125 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2126 				ret);
2127 			goto fail;
2128 		}
2129 
2130 		ret = tegra_pcie_config_ep(pcie, pdev);
2131 		if (ret < 0)
2132 			goto fail;
2133 		break;
2134 
2135 	default:
2136 		dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
2137 	}
2138 
2139 fail:
2140 	tegra_bpmp_put(pcie->bpmp);
2141 	return ret;
2142 }
2143 
2144 static int tegra_pcie_dw_remove(struct platform_device *pdev)
2145 {
2146 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2147 
2148 	if (pcie->mode == DW_PCIE_RC_TYPE) {
2149 		if (!pcie->link_state)
2150 			return 0;
2151 
2152 		debugfs_remove_recursive(pcie->debugfs);
2153 		tegra_pcie_deinit_controller(pcie);
2154 		pm_runtime_put_sync(pcie->dev);
2155 	} else {
2156 		disable_irq(pcie->pex_rst_irq);
2157 		pex_ep_event_pex_rst_assert(pcie);
2158 	}
2159 
2160 	pm_runtime_disable(pcie->dev);
2161 	tegra_bpmp_put(pcie->bpmp);
2162 	if (pcie->pex_refclk_sel_gpiod)
2163 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2164 
2165 	return 0;
2166 }
2167 
2168 static int tegra_pcie_dw_suspend_late(struct device *dev)
2169 {
2170 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2171 	u32 val;
2172 
2173 	if (pcie->mode == DW_PCIE_EP_TYPE) {
2174 		dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
2175 		return -EPERM;
2176 	}
2177 
2178 	if (!pcie->link_state)
2179 		return 0;
2180 
2181 	/* Enable HW_HOT_RST mode */
2182 	val = appl_readl(pcie, APPL_CTRL);
2183 	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2184 		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2185 	val |= APPL_CTRL_HW_HOT_RST_EN;
2186 	appl_writel(pcie, val, APPL_CTRL);
2187 
2188 	return 0;
2189 }
2190 
2191 static int tegra_pcie_dw_suspend_noirq(struct device *dev)
2192 {
2193 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2194 
2195 	if (!pcie->link_state)
2196 		return 0;
2197 
2198 	tegra_pcie_downstream_dev_to_D0(pcie);
2199 	tegra_pcie_dw_pme_turnoff(pcie);
2200 	tegra_pcie_unconfig_controller(pcie);
2201 
2202 	return 0;
2203 }
2204 
2205 static int tegra_pcie_dw_resume_noirq(struct device *dev)
2206 {
2207 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2208 	int ret;
2209 
2210 	if (!pcie->link_state)
2211 		return 0;
2212 
2213 	ret = tegra_pcie_config_controller(pcie, true);
2214 	if (ret < 0)
2215 		return ret;
2216 
2217 	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
2218 	if (ret < 0) {
2219 		dev_err(dev, "Failed to init host: %d\n", ret);
2220 		goto fail_host_init;
2221 	}
2222 
2223 	dw_pcie_setup_rc(&pcie->pci.pp);
2224 
2225 	ret = tegra_pcie_dw_start_link(&pcie->pci);
2226 	if (ret < 0)
2227 		goto fail_host_init;
2228 
2229 	return 0;
2230 
2231 fail_host_init:
2232 	tegra_pcie_unconfig_controller(pcie);
2233 	return ret;
2234 }
2235 
2236 static int tegra_pcie_dw_resume_early(struct device *dev)
2237 {
2238 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2239 	u32 val;
2240 
2241 	if (pcie->mode == DW_PCIE_EP_TYPE) {
2242 		dev_err(dev, "Suspend is not supported in EP mode");
2243 		return -ENOTSUPP;
2244 	}
2245 
2246 	if (!pcie->link_state)
2247 		return 0;
2248 
2249 	/* Disable HW_HOT_RST mode */
2250 	val = appl_readl(pcie, APPL_CTRL);
2251 	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2252 		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2253 	val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2254 	       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2255 	val &= ~APPL_CTRL_HW_HOT_RST_EN;
2256 	appl_writel(pcie, val, APPL_CTRL);
2257 
2258 	return 0;
2259 }
2260 
2261 static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
2262 {
2263 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2264 
2265 	if (pcie->mode == DW_PCIE_RC_TYPE) {
2266 		if (!pcie->link_state)
2267 			return;
2268 
2269 		debugfs_remove_recursive(pcie->debugfs);
2270 		tegra_pcie_downstream_dev_to_D0(pcie);
2271 
2272 		disable_irq(pcie->pci.pp.irq);
2273 		if (IS_ENABLED(CONFIG_PCI_MSI))
2274 			disable_irq(pcie->pci.pp.msi_irq);
2275 
2276 		tegra_pcie_dw_pme_turnoff(pcie);
2277 		tegra_pcie_unconfig_controller(pcie);
2278 		pm_runtime_put_sync(pcie->dev);
2279 	} else {
2280 		disable_irq(pcie->pex_rst_irq);
2281 		pex_ep_event_pex_rst_assert(pcie);
2282 	}
2283 }
2284 
2285 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
2286 	.mode = DW_PCIE_RC_TYPE,
2287 };
2288 
2289 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
2290 	.mode = DW_PCIE_EP_TYPE,
2291 };
2292 
2293 static const struct of_device_id tegra_pcie_dw_of_match[] = {
2294 	{
2295 		.compatible = "nvidia,tegra194-pcie",
2296 		.data = &tegra_pcie_dw_rc_of_data,
2297 	},
2298 	{
2299 		.compatible = "nvidia,tegra194-pcie-ep",
2300 		.data = &tegra_pcie_dw_ep_of_data,
2301 	},
2302 	{},
2303 };
2304 
2305 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
2306 	.suspend_late = tegra_pcie_dw_suspend_late,
2307 	.suspend_noirq = tegra_pcie_dw_suspend_noirq,
2308 	.resume_noirq = tegra_pcie_dw_resume_noirq,
2309 	.resume_early = tegra_pcie_dw_resume_early,
2310 };
2311 
2312 static struct platform_driver tegra_pcie_dw_driver = {
2313 	.probe = tegra_pcie_dw_probe,
2314 	.remove = tegra_pcie_dw_remove,
2315 	.shutdown = tegra_pcie_dw_shutdown,
2316 	.driver = {
2317 		.name	= "tegra194-pcie",
2318 		.pm = &tegra_pcie_dw_pm_ops,
2319 		.of_match_table = tegra_pcie_dw_of_match,
2320 	},
2321 };
2322 module_platform_driver(tegra_pcie_dw_driver);
2323 
2324 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
2325 
2326 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2327 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2328 MODULE_LICENSE("GPL v2");
2329