1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra194 SoC
4  *
5  * Copyright (C) 2019 NVIDIA Corporation.
6  *
7  * Author: Vidya Sagar <vidyas@nvidia.com>
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/gpio.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/random.h>
30 #include <linux/reset.h>
31 #include <linux/resource.h>
32 #include <linux/types.h>
33 #include "pcie-designware.h"
34 #include <soc/tegra/bpmp.h>
35 #include <soc/tegra/bpmp-abi.h>
36 #include "../../pci.h"
37 
38 #define APPL_PINMUX				0x0
39 #define APPL_PINMUX_PEX_RST			BIT(0)
40 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
41 #define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
42 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
43 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
44 
45 #define APPL_CTRL				0x4
46 #define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
47 #define APPL_CTRL_LTSSM_EN			BIT(7)
48 #define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
49 #define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
50 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
51 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
52 
53 #define APPL_INTR_EN_L0_0			0x8
54 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
55 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
56 #define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
57 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN	BIT(15)
58 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
59 #define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
60 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
61 
62 #define APPL_INTR_STATUS_L0			0xC
63 #define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
64 #define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
65 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT	BIT(15)
66 #define APPL_INTR_STATUS_L0_PEX_RST_INT		BIT(16)
67 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
68 
69 #define APPL_INTR_EN_L1_0_0				0x1C
70 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
71 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN		BIT(3)
72 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN	BIT(30)
73 
74 #define APPL_INTR_STATUS_L1_0_0				0x20
75 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
76 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED	BIT(3)
77 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE		BIT(30)
78 
79 #define APPL_INTR_STATUS_L1_1			0x2C
80 #define APPL_INTR_STATUS_L1_2			0x30
81 #define APPL_INTR_STATUS_L1_3			0x34
82 #define APPL_INTR_STATUS_L1_6			0x3C
83 #define APPL_INTR_STATUS_L1_7			0x40
84 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED	BIT(1)
85 
86 #define APPL_INTR_EN_L1_8_0			0x44
87 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
88 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
89 #define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
90 #define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
91 
92 #define APPL_INTR_STATUS_L1_8_0			0x4C
93 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
94 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
95 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
96 
97 #define APPL_INTR_STATUS_L1_9			0x54
98 #define APPL_INTR_STATUS_L1_10			0x58
99 #define APPL_INTR_STATUS_L1_11			0x64
100 #define APPL_INTR_STATUS_L1_13			0x74
101 #define APPL_INTR_STATUS_L1_14			0x78
102 #define APPL_INTR_STATUS_L1_15			0x7C
103 #define APPL_INTR_STATUS_L1_17			0x88
104 
105 #define APPL_INTR_EN_L1_18				0x90
106 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
107 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
108 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
109 
110 #define APPL_INTR_STATUS_L1_18				0x94
111 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
112 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
113 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
114 
115 #define APPL_MSI_CTRL_1				0xAC
116 
117 #define APPL_MSI_CTRL_2				0xB0
118 
119 #define APPL_LEGACY_INTX			0xB8
120 
121 #define APPL_LTR_MSG_1				0xC4
122 #define LTR_MSG_REQ				BIT(15)
123 #define LTR_MST_NO_SNOOP_SHIFT			16
124 
125 #define APPL_LTR_MSG_2				0xC8
126 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
127 
128 #define APPL_LINK_STATUS			0xCC
129 #define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
130 
131 #define APPL_DEBUG				0xD0
132 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
133 #define APPL_DEBUG_PM_LINKST_IN_L0		0x11
134 #define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
135 #define APPL_DEBUG_LTSSM_STATE_SHIFT		3
136 #define LTSSM_STATE_PRE_DETECT			5
137 
138 #define APPL_RADM_STATUS			0xE4
139 #define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
140 
141 #define APPL_DM_TYPE				0x100
142 #define APPL_DM_TYPE_MASK			GENMASK(3, 0)
143 #define APPL_DM_TYPE_RP				0x4
144 #define APPL_DM_TYPE_EP				0x0
145 
146 #define APPL_CFG_BASE_ADDR			0x104
147 #define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
148 
149 #define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
150 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
151 
152 #define APPL_CFG_MISC				0x110
153 #define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
154 #define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
155 #define APPL_CFG_MISC_ARCACHE_SHIFT		10
156 #define APPL_CFG_MISC_ARCACHE_VAL		3
157 
158 #define APPL_CFG_SLCG_OVERRIDE			0x114
159 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
160 
161 #define APPL_CAR_RESET_OVRD				0x12C
162 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
163 
164 #define IO_BASE_IO_DECODE				BIT(0)
165 #define IO_BASE_IO_DECODE_BIT8				BIT(8)
166 
167 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
168 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
169 
170 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
171 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
172 
173 #define N_FTS_VAL					52
174 #define FTS_VAL						52
175 
176 #define GEN3_EQ_CONTROL_OFF			0x8a8
177 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT	8
178 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)
179 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0)
180 
181 #define GEN3_RELATED_OFF			0x890
182 #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL	BIT(0)
183 #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE	BIT(16)
184 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT	24
185 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK	GENMASK(25, 24)
186 
187 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
188 #define AMBA_ERROR_RESPONSE_CRS_SHIFT		3
189 #define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0)
190 #define AMBA_ERROR_RESPONSE_CRS_OKAY		0
191 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF	1
192 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001	2
193 
194 #define MSIX_ADDR_MATCH_LOW_OFF			0x940
195 #define MSIX_ADDR_MATCH_LOW_OFF_EN		BIT(0)
196 #define MSIX_ADDR_MATCH_LOW_OFF_MASK		GENMASK(31, 2)
197 
198 #define MSIX_ADDR_MATCH_HIGH_OFF		0x944
199 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK		GENMASK(31, 0)
200 
201 #define PORT_LOGIC_MSIX_DOORBELL			0x948
202 
203 #define CAP_SPCIE_CAP_OFF			0x154
204 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
206 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
207 
208 #define PME_ACK_TIMEOUT 10000
209 
210 #define LTSSM_TIMEOUT 50000	/* 50ms */
211 
212 #define GEN3_GEN4_EQ_PRESET_INIT	5
213 
214 #define GEN1_CORE_CLK_FREQ	62500000
215 #define GEN2_CORE_CLK_FREQ	125000000
216 #define GEN3_CORE_CLK_FREQ	250000000
217 #define GEN4_CORE_CLK_FREQ	500000000
218 
219 #define LTR_MSG_TIMEOUT		(100 * 1000)
220 
221 #define PERST_DEBOUNCE_TIME	(5 * 1000)
222 
223 #define EP_STATE_DISABLED	0
224 #define EP_STATE_ENABLED	1
225 
226 static const unsigned int pcie_gen_freq[] = {
227 	GEN1_CORE_CLK_FREQ,
228 	GEN2_CORE_CLK_FREQ,
229 	GEN3_CORE_CLK_FREQ,
230 	GEN4_CORE_CLK_FREQ
231 };
232 
233 struct tegra_pcie_dw {
234 	struct device *dev;
235 	struct resource *appl_res;
236 	struct resource *dbi_res;
237 	struct resource *atu_dma_res;
238 	void __iomem *appl_base;
239 	struct clk *core_clk;
240 	struct reset_control *core_apb_rst;
241 	struct reset_control *core_rst;
242 	struct dw_pcie pci;
243 	struct tegra_bpmp *bpmp;
244 
245 	enum dw_pcie_device_mode mode;
246 
247 	bool supports_clkreq;
248 	bool enable_cdm_check;
249 	bool link_state;
250 	bool update_fc_fixup;
251 	u8 init_link_width;
252 	u32 msi_ctrl_int;
253 	u32 num_lanes;
254 	u32 cid;
255 	u32 cfg_link_cap_l1sub;
256 	u32 ras_des_cap;
257 	u32 pcie_cap_base;
258 	u32 aspm_cmrt;
259 	u32 aspm_pwr_on_t;
260 	u32 aspm_l0s_enter_lat;
261 
262 	struct regulator *pex_ctl_supply;
263 	struct regulator *slot_ctl_3v3;
264 	struct regulator *slot_ctl_12v;
265 
266 	unsigned int phy_count;
267 	struct phy **phys;
268 
269 	struct dentry *debugfs;
270 
271 	/* Endpoint mode specific */
272 	struct gpio_desc *pex_rst_gpiod;
273 	struct gpio_desc *pex_refclk_sel_gpiod;
274 	unsigned int pex_rst_irq;
275 	int ep_state;
276 };
277 
278 struct tegra_pcie_dw_of_data {
279 	enum dw_pcie_device_mode mode;
280 };
281 
282 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
283 {
284 	return container_of(pci, struct tegra_pcie_dw, pci);
285 }
286 
287 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
288 			       const u32 reg)
289 {
290 	writel_relaxed(value, pcie->appl_base + reg);
291 }
292 
293 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
294 {
295 	return readl_relaxed(pcie->appl_base + reg);
296 }
297 
298 struct tegra_pcie_soc {
299 	enum dw_pcie_device_mode mode;
300 };
301 
302 static void apply_bad_link_workaround(struct pcie_port *pp)
303 {
304 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
305 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
306 	u32 current_link_width;
307 	u16 val;
308 
309 	/*
310 	 * NOTE:- Since this scenario is uncommon and link as such is not
311 	 * stable anyway, not waiting to confirm if link is really
312 	 * transitioning to Gen-2 speed
313 	 */
314 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
315 	if (val & PCI_EXP_LNKSTA_LBMS) {
316 		current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
317 				     PCI_EXP_LNKSTA_NLW_SHIFT;
318 		if (pcie->init_link_width > current_link_width) {
319 			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
320 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
321 						PCI_EXP_LNKCTL2);
322 			val &= ~PCI_EXP_LNKCTL2_TLS;
323 			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
324 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
325 					   PCI_EXP_LNKCTL2, val);
326 
327 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
328 						PCI_EXP_LNKCTL);
329 			val |= PCI_EXP_LNKCTL_RL;
330 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
331 					   PCI_EXP_LNKCTL, val);
332 		}
333 	}
334 }
335 
336 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
337 {
338 	struct tegra_pcie_dw *pcie = arg;
339 	struct dw_pcie *pci = &pcie->pci;
340 	struct pcie_port *pp = &pci->pp;
341 	u32 val, status_l0, status_l1;
342 	u16 val_w;
343 
344 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
345 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
346 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
347 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
348 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
349 			/* SBR & Surprise Link Down WAR */
350 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
351 			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
352 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
353 			udelay(1);
354 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
355 			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
356 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
357 
358 			val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
359 			val |= PORT_LOGIC_SPEED_CHANGE;
360 			dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
361 		}
362 	}
363 
364 	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
365 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
366 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
367 			appl_writel(pcie,
368 				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
369 				    APPL_INTR_STATUS_L1_8_0);
370 			apply_bad_link_workaround(pp);
371 		}
372 		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
373 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
374 						  PCI_EXP_LNKSTA);
375 			val_w |= PCI_EXP_LNKSTA_LBMS;
376 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
377 					   PCI_EXP_LNKSTA, val_w);
378 
379 			appl_writel(pcie,
380 				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
381 				    APPL_INTR_STATUS_L1_8_0);
382 
383 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
384 						  PCI_EXP_LNKSTA);
385 			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
386 				PCI_EXP_LNKSTA_CLS);
387 		}
388 	}
389 
390 	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
391 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
392 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
393 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
394 			dev_info(pci->dev, "CDM check complete\n");
395 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
396 		}
397 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
398 			dev_err(pci->dev, "CDM comparison mismatch\n");
399 			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
400 		}
401 		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
402 			dev_err(pci->dev, "CDM Logic error\n");
403 			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
404 		}
405 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
406 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
407 		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
408 	}
409 
410 	return IRQ_HANDLED;
411 }
412 
413 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
414 {
415 	u32 val;
416 
417 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
418 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
419 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
420 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
421 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
422 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
423 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
424 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
425 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
426 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
427 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
428 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
429 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
430 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
431 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
432 	appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
433 
434 	val = appl_readl(pcie, APPL_CTRL);
435 	val |= APPL_CTRL_LTSSM_EN;
436 	appl_writel(pcie, val, APPL_CTRL);
437 }
438 
439 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
440 {
441 	struct tegra_pcie_dw *pcie = arg;
442 	struct dw_pcie *pci = &pcie->pci;
443 	u32 val, speed;
444 
445 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
446 		PCI_EXP_LNKSTA_CLS;
447 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
448 
449 	/* If EP doesn't advertise L1SS, just return */
450 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
451 	if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
452 		return IRQ_HANDLED;
453 
454 	/* Check if BME is set to '1' */
455 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
456 	if (val & PCI_COMMAND_MASTER) {
457 		ktime_t timeout;
458 
459 		/* 110us for both snoop and no-snoop */
460 		val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
461 		val |= (val << LTR_MST_NO_SNOOP_SHIFT);
462 		appl_writel(pcie, val, APPL_LTR_MSG_1);
463 
464 		/* Send LTR upstream */
465 		val = appl_readl(pcie, APPL_LTR_MSG_2);
466 		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
467 		appl_writel(pcie, val, APPL_LTR_MSG_2);
468 
469 		timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
470 		for (;;) {
471 			val = appl_readl(pcie, APPL_LTR_MSG_2);
472 			if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
473 				break;
474 			if (ktime_after(ktime_get(), timeout))
475 				break;
476 			usleep_range(1000, 1100);
477 		}
478 		if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
479 			dev_err(pcie->dev, "Failed to send LTR message\n");
480 	}
481 
482 	return IRQ_HANDLED;
483 }
484 
485 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
486 {
487 	struct tegra_pcie_dw *pcie = arg;
488 	struct dw_pcie_ep *ep = &pcie->pci.ep;
489 	int spurious = 1;
490 	u32 status_l0, status_l1, link_status;
491 
492 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
493 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
494 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
495 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
496 
497 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
498 			pex_ep_event_hot_rst_done(pcie);
499 
500 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
501 			link_status = appl_readl(pcie, APPL_LINK_STATUS);
502 			if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
503 				dev_dbg(pcie->dev, "Link is up with Host\n");
504 				dw_pcie_ep_linkup(ep);
505 			}
506 		}
507 
508 		spurious = 0;
509 	}
510 
511 	if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
512 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
513 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
514 
515 		if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
516 			return IRQ_WAKE_THREAD;
517 
518 		spurious = 0;
519 	}
520 
521 	if (spurious) {
522 		dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
523 			 status_l0);
524 		appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
525 	}
526 
527 	return IRQ_HANDLED;
528 }
529 
530 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
531 				     int size, u32 *val)
532 {
533 	/*
534 	 * This is an endpoint mode specific register happen to appear even
535 	 * when controller is operating in root port mode and system hangs
536 	 * when it is accessed with link being in ASPM-L1 state.
537 	 * So skip accessing it altogether
538 	 */
539 	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
540 		*val = 0x00000000;
541 		return PCIBIOS_SUCCESSFUL;
542 	}
543 
544 	return pci_generic_config_read(bus, devfn, where, size, val);
545 }
546 
547 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
548 				     int size, u32 val)
549 {
550 	/*
551 	 * This is an endpoint mode specific register happen to appear even
552 	 * when controller is operating in root port mode and system hangs
553 	 * when it is accessed with link being in ASPM-L1 state.
554 	 * So skip accessing it altogether
555 	 */
556 	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
557 		return PCIBIOS_SUCCESSFUL;
558 
559 	return pci_generic_config_write(bus, devfn, where, size, val);
560 }
561 
562 static struct pci_ops tegra_pci_ops = {
563 	.map_bus = dw_pcie_own_conf_map_bus,
564 	.read = tegra_pcie_dw_rd_own_conf,
565 	.write = tegra_pcie_dw_wr_own_conf,
566 };
567 
568 #if defined(CONFIG_PCIEASPM)
569 static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
570 {
571 	u32 val;
572 
573 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
574 	val &= ~PCI_L1SS_CAP_ASPM_L1_1;
575 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
576 }
577 
578 static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
579 {
580 	u32 val;
581 
582 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
583 	val &= ~PCI_L1SS_CAP_ASPM_L1_2;
584 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
585 }
586 
587 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
588 {
589 	u32 val;
590 
591 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
592 				PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
593 	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
594 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
595 	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
596 	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
597 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
598 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
599 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
600 				PCIE_RAS_DES_EVENT_COUNTER_DATA);
601 
602 	return val;
603 }
604 
605 static int aspm_state_cnt(struct seq_file *s, void *data)
606 {
607 	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
608 				     dev_get_drvdata(s->private);
609 	u32 val;
610 
611 	seq_printf(s, "Tx L0s entry count : %u\n",
612 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
613 
614 	seq_printf(s, "Rx L0s entry count : %u\n",
615 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
616 
617 	seq_printf(s, "Link L1 entry count : %u\n",
618 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
619 
620 	seq_printf(s, "Link L1.1 entry count : %u\n",
621 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
622 
623 	seq_printf(s, "Link L1.2 entry count : %u\n",
624 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
625 
626 	/* Clear all counters */
627 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
628 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
629 			   EVENT_COUNTER_ALL_CLEAR);
630 
631 	/* Re-enable counting */
632 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
633 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
634 	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
635 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
636 
637 	return 0;
638 }
639 
640 static void init_host_aspm(struct tegra_pcie_dw *pcie)
641 {
642 	struct dw_pcie *pci = &pcie->pci;
643 	u32 val;
644 
645 	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
646 	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
647 
648 	pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
649 							PCI_EXT_CAP_ID_VNDR);
650 
651 	/* Enable ASPM counters */
652 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
653 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
654 	dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
655 			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
656 
657 	/* Program T_cmrt and T_pwr_on values */
658 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
659 	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
660 	val |= (pcie->aspm_cmrt << 8);
661 	val |= (pcie->aspm_pwr_on_t << 19);
662 	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
663 
664 	/* Program L0s and L1 entrance latencies */
665 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
666 	val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
667 	val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
668 	val |= PORT_AFR_ENTER_ASPM;
669 	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
670 }
671 
672 static void init_debugfs(struct tegra_pcie_dw *pcie)
673 {
674 	debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
675 				    aspm_state_cnt);
676 }
677 #else
678 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
679 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
680 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
681 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
682 #endif
683 
684 static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
685 {
686 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
687 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
688 	u32 val;
689 	u16 val_w;
690 
691 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
692 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
693 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
694 
695 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
696 	val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
697 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
698 
699 	if (pcie->enable_cdm_check) {
700 		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
701 		val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
702 		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
703 
704 		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
705 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
706 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
707 		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
708 	}
709 
710 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
711 				  PCI_EXP_LNKSTA);
712 	pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
713 				PCI_EXP_LNKSTA_NLW_SHIFT;
714 
715 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
716 				  PCI_EXP_LNKCTL);
717 	val_w |= PCI_EXP_LNKCTL_LBMIE;
718 	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
719 			   val_w);
720 }
721 
722 static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
723 {
724 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
725 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
726 	u32 val;
727 
728 	/* Enable legacy interrupt generation */
729 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
730 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
731 	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
732 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
733 
734 	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
735 	val |= APPL_INTR_EN_L1_8_INTX_EN;
736 	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
737 	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
738 	if (IS_ENABLED(CONFIG_PCIEAER))
739 		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
740 	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
741 }
742 
743 static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
744 {
745 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
746 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
747 	u32 val;
748 
749 	/* Enable MSI interrupt generation */
750 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
751 	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
752 	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
753 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
754 }
755 
756 static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
757 {
758 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
759 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
760 
761 	/* Clear interrupt statuses before enabling interrupts */
762 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
763 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
764 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
765 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
766 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
767 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
768 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
769 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
770 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
771 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
772 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
773 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
774 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
775 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
776 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
777 
778 	tegra_pcie_enable_system_interrupts(pp);
779 	tegra_pcie_enable_legacy_interrupts(pp);
780 	if (IS_ENABLED(CONFIG_PCI_MSI))
781 		tegra_pcie_enable_msi_interrupts(pp);
782 }
783 
784 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
785 {
786 	struct dw_pcie *pci = &pcie->pci;
787 	u32 val, offset, i;
788 
789 	/* Program init preset */
790 	for (i = 0; i < pcie->num_lanes; i++) {
791 		val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
792 		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
793 		val |= GEN3_GEN4_EQ_PRESET_INIT;
794 		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
795 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
796 			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
797 		dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
798 
799 		offset = dw_pcie_find_ext_capability(pci,
800 						     PCI_EXT_CAP_ID_PL_16GT) +
801 				PCI_PL_16GT_LE_CTRL;
802 		val = dw_pcie_readb_dbi(pci, offset + i);
803 		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
804 		val |= GEN3_GEN4_EQ_PRESET_INIT;
805 		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
806 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
807 			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
808 		dw_pcie_writeb_dbi(pci, offset + i, val);
809 	}
810 
811 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
812 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
813 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
814 
815 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
816 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
817 	val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
818 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
819 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
820 
821 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
822 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
823 	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
824 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
825 
826 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
827 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
828 	val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
829 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
830 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
831 
832 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
833 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
834 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
835 }
836 
837 static int tegra_pcie_dw_host_init(struct pcie_port *pp)
838 {
839 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
840 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
841 	u32 val;
842 	u16 val_16;
843 
844 	pp->bridge->ops = &tegra_pci_ops;
845 
846 	if (!pcie->pcie_cap_base)
847 		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
848 							      PCI_CAP_ID_EXP);
849 
850 	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
851 	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
852 	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
853 	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
854 
855 	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
856 	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
857 	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
858 
859 	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
860 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
861 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
862 	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
863 
864 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
865 
866 	/* Enable as 0xFFFF0001 response for CRS */
867 	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
868 	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
869 	val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
870 		AMBA_ERROR_RESPONSE_CRS_SHIFT);
871 	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
872 
873 	/* Configure Max lane width from DT */
874 	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
875 	val &= ~PCI_EXP_LNKCAP_MLW;
876 	val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
877 	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
878 
879 	config_gen3_gen4_eq_presets(pcie);
880 
881 	init_host_aspm(pcie);
882 
883 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
884 	if (!pcie->supports_clkreq) {
885 		disable_aspm_l11(pcie);
886 		disable_aspm_l12(pcie);
887 	}
888 
889 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
890 	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
891 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
892 
893 	if (pcie->update_fc_fixup) {
894 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
895 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
896 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
897 	}
898 
899 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
900 
901 	return 0;
902 }
903 
904 static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
905 {
906 	u32 val, offset, speed, tmp;
907 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
908 	struct pcie_port *pp = &pci->pp;
909 	bool retry = true;
910 
911 	if (pcie->mode == DW_PCIE_EP_TYPE) {
912 		enable_irq(pcie->pex_rst_irq);
913 		return 0;
914 	}
915 
916 retry_link:
917 	/* Assert RST */
918 	val = appl_readl(pcie, APPL_PINMUX);
919 	val &= ~APPL_PINMUX_PEX_RST;
920 	appl_writel(pcie, val, APPL_PINMUX);
921 
922 	usleep_range(100, 200);
923 
924 	/* Enable LTSSM */
925 	val = appl_readl(pcie, APPL_CTRL);
926 	val |= APPL_CTRL_LTSSM_EN;
927 	appl_writel(pcie, val, APPL_CTRL);
928 
929 	/* De-assert RST */
930 	val = appl_readl(pcie, APPL_PINMUX);
931 	val |= APPL_PINMUX_PEX_RST;
932 	appl_writel(pcie, val, APPL_PINMUX);
933 
934 	msleep(100);
935 
936 	if (dw_pcie_wait_for_link(pci)) {
937 		if (!retry)
938 			return 0;
939 		/*
940 		 * There are some endpoints which can't get the link up if
941 		 * root port has Data Link Feature (DLF) enabled.
942 		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
943 		 * on Scaled Flow Control and DLF.
944 		 * So, need to confirm that is indeed the case here and attempt
945 		 * link up once again with DLF disabled.
946 		 */
947 		val = appl_readl(pcie, APPL_DEBUG);
948 		val &= APPL_DEBUG_LTSSM_STATE_MASK;
949 		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
950 		tmp = appl_readl(pcie, APPL_LINK_STATUS);
951 		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
952 		if (!(val == 0x11 && !tmp)) {
953 			/* Link is down for all good reasons */
954 			return 0;
955 		}
956 
957 		dev_info(pci->dev, "Link is down in DLL");
958 		dev_info(pci->dev, "Trying again with DLFE disabled\n");
959 		/* Disable LTSSM */
960 		val = appl_readl(pcie, APPL_CTRL);
961 		val &= ~APPL_CTRL_LTSSM_EN;
962 		appl_writel(pcie, val, APPL_CTRL);
963 
964 		reset_control_assert(pcie->core_rst);
965 		reset_control_deassert(pcie->core_rst);
966 
967 		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
968 		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
969 		val &= ~PCI_DLF_EXCHANGE_ENABLE;
970 		dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
971 
972 		tegra_pcie_dw_host_init(pp);
973 		dw_pcie_setup_rc(pp);
974 
975 		retry = false;
976 		goto retry_link;
977 	}
978 
979 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
980 		PCI_EXP_LNKSTA_CLS;
981 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
982 
983 	tegra_pcie_enable_interrupts(pp);
984 
985 	return 0;
986 }
987 
988 static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
989 {
990 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
991 	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
992 
993 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
994 }
995 
996 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
997 {
998 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
999 
1000 	disable_irq(pcie->pex_rst_irq);
1001 }
1002 
1003 static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1004 	.link_up = tegra_pcie_dw_link_up,
1005 	.start_link = tegra_pcie_dw_start_link,
1006 	.stop_link = tegra_pcie_dw_stop_link,
1007 };
1008 
1009 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
1010 	.host_init = tegra_pcie_dw_host_init,
1011 };
1012 
1013 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
1014 {
1015 	unsigned int phy_count = pcie->phy_count;
1016 
1017 	while (phy_count--) {
1018 		phy_power_off(pcie->phys[phy_count]);
1019 		phy_exit(pcie->phys[phy_count]);
1020 	}
1021 }
1022 
1023 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
1024 {
1025 	unsigned int i;
1026 	int ret;
1027 
1028 	for (i = 0; i < pcie->phy_count; i++) {
1029 		ret = phy_init(pcie->phys[i]);
1030 		if (ret < 0)
1031 			goto phy_power_off;
1032 
1033 		ret = phy_power_on(pcie->phys[i]);
1034 		if (ret < 0)
1035 			goto phy_exit;
1036 	}
1037 
1038 	return 0;
1039 
1040 phy_power_off:
1041 	while (i--) {
1042 		phy_power_off(pcie->phys[i]);
1043 phy_exit:
1044 		phy_exit(pcie->phys[i]);
1045 	}
1046 
1047 	return ret;
1048 }
1049 
1050 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
1051 {
1052 	struct platform_device *pdev = to_platform_device(pcie->dev);
1053 	struct device_node *np = pcie->dev->of_node;
1054 	int ret;
1055 
1056 	pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1057 	if (!pcie->dbi_res) {
1058 		dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1059 		return -ENODEV;
1060 	}
1061 
1062 	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1063 	if (ret < 0) {
1064 		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1065 		return ret;
1066 	}
1067 
1068 	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1069 				   &pcie->aspm_pwr_on_t);
1070 	if (ret < 0)
1071 		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1072 			 ret);
1073 
1074 	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1075 				   &pcie->aspm_l0s_enter_lat);
1076 	if (ret < 0)
1077 		dev_info(pcie->dev,
1078 			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1079 
1080 	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1081 	if (ret < 0) {
1082 		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1083 		return ret;
1084 	}
1085 
1086 	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1087 	if (ret) {
1088 		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1089 		return ret;
1090 	}
1091 
1092 	ret = of_property_count_strings(np, "phy-names");
1093 	if (ret < 0) {
1094 		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1095 			ret);
1096 		return ret;
1097 	}
1098 	pcie->phy_count = ret;
1099 
1100 	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1101 		pcie->update_fc_fixup = true;
1102 
1103 	pcie->supports_clkreq =
1104 		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1105 
1106 	pcie->enable_cdm_check =
1107 		of_property_read_bool(np, "snps,enable-cdm-check");
1108 
1109 	if (pcie->mode == DW_PCIE_RC_TYPE)
1110 		return 0;
1111 
1112 	/* Endpoint mode specific DT entries */
1113 	pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1114 	if (IS_ERR(pcie->pex_rst_gpiod)) {
1115 		int err = PTR_ERR(pcie->pex_rst_gpiod);
1116 		const char *level = KERN_ERR;
1117 
1118 		if (err == -EPROBE_DEFER)
1119 			level = KERN_DEBUG;
1120 
1121 		dev_printk(level, pcie->dev,
1122 			   dev_fmt("Failed to get PERST GPIO: %d\n"),
1123 			   err);
1124 		return err;
1125 	}
1126 
1127 	pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1128 						    "nvidia,refclk-select",
1129 						    GPIOD_OUT_HIGH);
1130 	if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1131 		int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1132 		const char *level = KERN_ERR;
1133 
1134 		if (err == -EPROBE_DEFER)
1135 			level = KERN_DEBUG;
1136 
1137 		dev_printk(level, pcie->dev,
1138 			   dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1139 			   err);
1140 		pcie->pex_refclk_sel_gpiod = NULL;
1141 	}
1142 
1143 	return 0;
1144 }
1145 
1146 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
1147 					  bool enable)
1148 {
1149 	struct mrq_uphy_response resp;
1150 	struct tegra_bpmp_message msg;
1151 	struct mrq_uphy_request req;
1152 
1153 	/* Controller-5 doesn't need to have its state set by BPMP-FW */
1154 	if (pcie->cid == 5)
1155 		return 0;
1156 
1157 	memset(&req, 0, sizeof(req));
1158 	memset(&resp, 0, sizeof(resp));
1159 
1160 	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1161 	req.controller_state.pcie_controller = pcie->cid;
1162 	req.controller_state.enable = enable;
1163 
1164 	memset(&msg, 0, sizeof(msg));
1165 	msg.mrq = MRQ_UPHY;
1166 	msg.tx.data = &req;
1167 	msg.tx.size = sizeof(req);
1168 	msg.rx.data = &resp;
1169 	msg.rx.size = sizeof(resp);
1170 
1171 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1172 }
1173 
1174 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
1175 					 bool enable)
1176 {
1177 	struct mrq_uphy_response resp;
1178 	struct tegra_bpmp_message msg;
1179 	struct mrq_uphy_request req;
1180 
1181 	memset(&req, 0, sizeof(req));
1182 	memset(&resp, 0, sizeof(resp));
1183 
1184 	if (enable) {
1185 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1186 		req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1187 	} else {
1188 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1189 		req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1190 	}
1191 
1192 	memset(&msg, 0, sizeof(msg));
1193 	msg.mrq = MRQ_UPHY;
1194 	msg.tx.data = &req;
1195 	msg.tx.size = sizeof(req);
1196 	msg.rx.data = &resp;
1197 	msg.rx.size = sizeof(resp);
1198 
1199 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1200 }
1201 
1202 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1203 {
1204 	struct pcie_port *pp = &pcie->pci.pp;
1205 	struct pci_bus *child, *root_bus = NULL;
1206 	struct pci_dev *pdev;
1207 
1208 	/*
1209 	 * link doesn't go into L2 state with some of the endpoints with Tegra
1210 	 * if they are not in D0 state. So, need to make sure that immediate
1211 	 * downstream devices are in D0 state before sending PME_TurnOff to put
1212 	 * link into L2 state.
1213 	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1214 	 * 5.2 Link State Power Management (Page #428).
1215 	 */
1216 
1217 	list_for_each_entry(child, &pp->bridge->bus->children, node) {
1218 		/* Bring downstream devices to D0 if they are not already in */
1219 		if (child->parent == pp->bridge->bus) {
1220 			root_bus = child;
1221 			break;
1222 		}
1223 	}
1224 
1225 	if (!root_bus) {
1226 		dev_err(pcie->dev, "Failed to find downstream devices\n");
1227 		return;
1228 	}
1229 
1230 	list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1231 		if (PCI_SLOT(pdev->devfn) == 0) {
1232 			if (pci_set_power_state(pdev, PCI_D0))
1233 				dev_err(pcie->dev,
1234 					"Failed to transition %s to D0 state\n",
1235 					dev_name(&pdev->dev));
1236 		}
1237 	}
1238 }
1239 
1240 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1241 {
1242 	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1243 	if (IS_ERR(pcie->slot_ctl_3v3)) {
1244 		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1245 			return PTR_ERR(pcie->slot_ctl_3v3);
1246 
1247 		pcie->slot_ctl_3v3 = NULL;
1248 	}
1249 
1250 	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1251 	if (IS_ERR(pcie->slot_ctl_12v)) {
1252 		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1253 			return PTR_ERR(pcie->slot_ctl_12v);
1254 
1255 		pcie->slot_ctl_12v = NULL;
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1262 {
1263 	int ret;
1264 
1265 	if (pcie->slot_ctl_3v3) {
1266 		ret = regulator_enable(pcie->slot_ctl_3v3);
1267 		if (ret < 0) {
1268 			dev_err(pcie->dev,
1269 				"Failed to enable 3.3V slot supply: %d\n", ret);
1270 			return ret;
1271 		}
1272 	}
1273 
1274 	if (pcie->slot_ctl_12v) {
1275 		ret = regulator_enable(pcie->slot_ctl_12v);
1276 		if (ret < 0) {
1277 			dev_err(pcie->dev,
1278 				"Failed to enable 12V slot supply: %d\n", ret);
1279 			goto fail_12v_enable;
1280 		}
1281 	}
1282 
1283 	/*
1284 	 * According to PCI Express Card Electromechanical Specification
1285 	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1286 	 * should be a minimum of 100ms.
1287 	 */
1288 	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1289 		msleep(100);
1290 
1291 	return 0;
1292 
1293 fail_12v_enable:
1294 	if (pcie->slot_ctl_3v3)
1295 		regulator_disable(pcie->slot_ctl_3v3);
1296 	return ret;
1297 }
1298 
1299 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1300 {
1301 	if (pcie->slot_ctl_12v)
1302 		regulator_disable(pcie->slot_ctl_12v);
1303 	if (pcie->slot_ctl_3v3)
1304 		regulator_disable(pcie->slot_ctl_3v3);
1305 }
1306 
1307 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1308 					bool en_hw_hot_rst)
1309 {
1310 	int ret;
1311 	u32 val;
1312 
1313 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1314 	if (ret) {
1315 		dev_err(pcie->dev,
1316 			"Failed to enable controller %u: %d\n", pcie->cid, ret);
1317 		return ret;
1318 	}
1319 
1320 	ret = tegra_pcie_enable_slot_regulators(pcie);
1321 	if (ret < 0)
1322 		goto fail_slot_reg_en;
1323 
1324 	ret = regulator_enable(pcie->pex_ctl_supply);
1325 	if (ret < 0) {
1326 		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1327 		goto fail_reg_en;
1328 	}
1329 
1330 	ret = clk_prepare_enable(pcie->core_clk);
1331 	if (ret) {
1332 		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1333 		goto fail_core_clk;
1334 	}
1335 
1336 	ret = reset_control_deassert(pcie->core_apb_rst);
1337 	if (ret) {
1338 		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1339 			ret);
1340 		goto fail_core_apb_rst;
1341 	}
1342 
1343 	if (en_hw_hot_rst) {
1344 		/* Enable HW_HOT_RST mode */
1345 		val = appl_readl(pcie, APPL_CTRL);
1346 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1347 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1348 		val |= APPL_CTRL_HW_HOT_RST_EN;
1349 		appl_writel(pcie, val, APPL_CTRL);
1350 	}
1351 
1352 	ret = tegra_pcie_enable_phy(pcie);
1353 	if (ret) {
1354 		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1355 		goto fail_phy;
1356 	}
1357 
1358 	/* Update CFG base address */
1359 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1360 		    APPL_CFG_BASE_ADDR);
1361 
1362 	/* Configure this core for RP mode operation */
1363 	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1364 
1365 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1366 
1367 	val = appl_readl(pcie, APPL_CTRL);
1368 	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1369 
1370 	val = appl_readl(pcie, APPL_CFG_MISC);
1371 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1372 	appl_writel(pcie, val, APPL_CFG_MISC);
1373 
1374 	if (!pcie->supports_clkreq) {
1375 		val = appl_readl(pcie, APPL_PINMUX);
1376 		val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1377 		val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1378 		appl_writel(pcie, val, APPL_PINMUX);
1379 	}
1380 
1381 	/* Update iATU_DMA base address */
1382 	appl_writel(pcie,
1383 		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1384 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1385 
1386 	reset_control_deassert(pcie->core_rst);
1387 
1388 	return ret;
1389 
1390 fail_phy:
1391 	reset_control_assert(pcie->core_apb_rst);
1392 fail_core_apb_rst:
1393 	clk_disable_unprepare(pcie->core_clk);
1394 fail_core_clk:
1395 	regulator_disable(pcie->pex_ctl_supply);
1396 fail_reg_en:
1397 	tegra_pcie_disable_slot_regulators(pcie);
1398 fail_slot_reg_en:
1399 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1400 
1401 	return ret;
1402 }
1403 
1404 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
1405 {
1406 	int ret;
1407 
1408 	ret = reset_control_assert(pcie->core_rst);
1409 	if (ret)
1410 		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1411 
1412 	tegra_pcie_disable_phy(pcie);
1413 
1414 	ret = reset_control_assert(pcie->core_apb_rst);
1415 	if (ret)
1416 		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1417 
1418 	clk_disable_unprepare(pcie->core_clk);
1419 
1420 	ret = regulator_disable(pcie->pex_ctl_supply);
1421 	if (ret)
1422 		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1423 
1424 	tegra_pcie_disable_slot_regulators(pcie);
1425 
1426 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1427 	if (ret)
1428 		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1429 			pcie->cid, ret);
1430 }
1431 
1432 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1433 {
1434 	struct dw_pcie *pci = &pcie->pci;
1435 	struct pcie_port *pp = &pci->pp;
1436 	int ret;
1437 
1438 	ret = tegra_pcie_config_controller(pcie, false);
1439 	if (ret < 0)
1440 		return ret;
1441 
1442 	pp->ops = &tegra_pcie_dw_host_ops;
1443 
1444 	ret = dw_pcie_host_init(pp);
1445 	if (ret < 0) {
1446 		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1447 		goto fail_host_init;
1448 	}
1449 
1450 	return 0;
1451 
1452 fail_host_init:
1453 	tegra_pcie_unconfig_controller(pcie);
1454 	return ret;
1455 }
1456 
1457 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1458 {
1459 	u32 val;
1460 
1461 	if (!tegra_pcie_dw_link_up(&pcie->pci))
1462 		return 0;
1463 
1464 	val = appl_readl(pcie, APPL_RADM_STATUS);
1465 	val |= APPL_PM_XMT_TURNOFF_STATE;
1466 	appl_writel(pcie, val, APPL_RADM_STATUS);
1467 
1468 	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1469 				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1470 				 1, PME_ACK_TIMEOUT);
1471 }
1472 
1473 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1474 {
1475 	u32 data;
1476 	int err;
1477 
1478 	if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1479 		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1480 		return;
1481 	}
1482 
1483 	/*
1484 	 * PCIe controller exits from L2 only if reset is applied, so
1485 	 * controller doesn't handle interrupts. But in cases where
1486 	 * L2 entry fails, PERST# is asserted which can trigger surprise
1487 	 * link down AER. However this function call happens in
1488 	 * suspend_noirq(), so AER interrupt will not be processed.
1489 	 * Disable all interrupts to avoid such a scenario.
1490 	 */
1491 	appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
1492 
1493 	if (tegra_pcie_try_link_l2(pcie)) {
1494 		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1495 		/*
1496 		 * TX lane clock freq will reset to Gen1 only if link is in L2
1497 		 * or detect state.
1498 		 * So apply pex_rst to end point to force RP to go into detect
1499 		 * state
1500 		 */
1501 		data = appl_readl(pcie, APPL_PINMUX);
1502 		data &= ~APPL_PINMUX_PEX_RST;
1503 		appl_writel(pcie, data, APPL_PINMUX);
1504 
1505 		/*
1506 		 * Some cards do not go to detect state even after de-asserting
1507 		 * PERST#. So, de-assert LTSSM to bring link to detect state.
1508 		 */
1509 		data = readl(pcie->appl_base + APPL_CTRL);
1510 		data &= ~APPL_CTRL_LTSSM_EN;
1511 		writel(data, pcie->appl_base + APPL_CTRL);
1512 
1513 		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1514 						data,
1515 						((data &
1516 						APPL_DEBUG_LTSSM_STATE_MASK) >>
1517 						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1518 						LTSSM_STATE_PRE_DETECT,
1519 						1, LTSSM_TIMEOUT);
1520 		if (err)
1521 			dev_info(pcie->dev, "Link didn't go to detect state\n");
1522 	}
1523 	/*
1524 	 * DBI registers may not be accessible after this as PLL-E would be
1525 	 * down depending on how CLKREQ is pulled by end point
1526 	 */
1527 	data = appl_readl(pcie, APPL_PINMUX);
1528 	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1529 	/* Cut REFCLK to slot */
1530 	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1531 	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1532 	appl_writel(pcie, data, APPL_PINMUX);
1533 }
1534 
1535 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1536 {
1537 	tegra_pcie_downstream_dev_to_D0(pcie);
1538 	dw_pcie_host_deinit(&pcie->pci.pp);
1539 	tegra_pcie_dw_pme_turnoff(pcie);
1540 	tegra_pcie_unconfig_controller(pcie);
1541 }
1542 
1543 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1544 {
1545 	struct device *dev = pcie->dev;
1546 	char *name;
1547 	int ret;
1548 
1549 	pm_runtime_enable(dev);
1550 
1551 	ret = pm_runtime_get_sync(dev);
1552 	if (ret < 0) {
1553 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1554 			ret);
1555 		goto fail_pm_get_sync;
1556 	}
1557 
1558 	ret = pinctrl_pm_select_default_state(dev);
1559 	if (ret < 0) {
1560 		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1561 		goto fail_pm_get_sync;
1562 	}
1563 
1564 	ret = tegra_pcie_init_controller(pcie);
1565 	if (ret < 0) {
1566 		dev_err(dev, "Failed to initialize controller: %d\n", ret);
1567 		goto fail_pm_get_sync;
1568 	}
1569 
1570 	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1571 	if (!pcie->link_state) {
1572 		ret = -ENOMEDIUM;
1573 		goto fail_host_init;
1574 	}
1575 
1576 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1577 	if (!name) {
1578 		ret = -ENOMEM;
1579 		goto fail_host_init;
1580 	}
1581 
1582 	pcie->debugfs = debugfs_create_dir(name, NULL);
1583 	init_debugfs(pcie);
1584 
1585 	return ret;
1586 
1587 fail_host_init:
1588 	tegra_pcie_deinit_controller(pcie);
1589 fail_pm_get_sync:
1590 	pm_runtime_put_sync(dev);
1591 	pm_runtime_disable(dev);
1592 	return ret;
1593 }
1594 
1595 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
1596 {
1597 	u32 val;
1598 	int ret;
1599 
1600 	if (pcie->ep_state == EP_STATE_DISABLED)
1601 		return;
1602 
1603 	/* Disable LTSSM */
1604 	val = appl_readl(pcie, APPL_CTRL);
1605 	val &= ~APPL_CTRL_LTSSM_EN;
1606 	appl_writel(pcie, val, APPL_CTRL);
1607 
1608 	ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1609 				 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1610 				 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1611 				 LTSSM_STATE_PRE_DETECT,
1612 				 1, LTSSM_TIMEOUT);
1613 	if (ret)
1614 		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1615 
1616 	reset_control_assert(pcie->core_rst);
1617 
1618 	tegra_pcie_disable_phy(pcie);
1619 
1620 	reset_control_assert(pcie->core_apb_rst);
1621 
1622 	clk_disable_unprepare(pcie->core_clk);
1623 
1624 	pm_runtime_put_sync(pcie->dev);
1625 
1626 	ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1627 	if (ret)
1628 		dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
1629 
1630 	pcie->ep_state = EP_STATE_DISABLED;
1631 	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1632 }
1633 
1634 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
1635 {
1636 	struct dw_pcie *pci = &pcie->pci;
1637 	struct dw_pcie_ep *ep = &pci->ep;
1638 	struct device *dev = pcie->dev;
1639 	u32 val;
1640 	int ret;
1641 	u16 val_16;
1642 
1643 	if (pcie->ep_state == EP_STATE_ENABLED)
1644 		return;
1645 
1646 	ret = pm_runtime_resume_and_get(dev);
1647 	if (ret < 0) {
1648 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1649 			ret);
1650 		return;
1651 	}
1652 
1653 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1654 	if (ret) {
1655 		dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
1656 			pcie->cid, ret);
1657 		goto fail_set_ctrl_state;
1658 	}
1659 
1660 	ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1661 	if (ret) {
1662 		dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
1663 		goto fail_pll_init;
1664 	}
1665 
1666 	ret = clk_prepare_enable(pcie->core_clk);
1667 	if (ret) {
1668 		dev_err(dev, "Failed to enable core clock: %d\n", ret);
1669 		goto fail_core_clk_enable;
1670 	}
1671 
1672 	ret = reset_control_deassert(pcie->core_apb_rst);
1673 	if (ret) {
1674 		dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1675 		goto fail_core_apb_rst;
1676 	}
1677 
1678 	ret = tegra_pcie_enable_phy(pcie);
1679 	if (ret) {
1680 		dev_err(dev, "Failed to enable PHY: %d\n", ret);
1681 		goto fail_phy;
1682 	}
1683 
1684 	/* Clear any stale interrupt statuses */
1685 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1686 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1687 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1688 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1689 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1690 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1691 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1692 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1693 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1694 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1695 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1696 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1697 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1698 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1699 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1700 
1701 	/* configure this core for EP mode operation */
1702 	val = appl_readl(pcie, APPL_DM_TYPE);
1703 	val &= ~APPL_DM_TYPE_MASK;
1704 	val |= APPL_DM_TYPE_EP;
1705 	appl_writel(pcie, val, APPL_DM_TYPE);
1706 
1707 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1708 
1709 	val = appl_readl(pcie, APPL_CTRL);
1710 	val |= APPL_CTRL_SYS_PRE_DET_STATE;
1711 	val |= APPL_CTRL_HW_HOT_RST_EN;
1712 	appl_writel(pcie, val, APPL_CTRL);
1713 
1714 	val = appl_readl(pcie, APPL_CFG_MISC);
1715 	val |= APPL_CFG_MISC_SLV_EP_MODE;
1716 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1717 	appl_writel(pcie, val, APPL_CFG_MISC);
1718 
1719 	val = appl_readl(pcie, APPL_PINMUX);
1720 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1721 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1722 	appl_writel(pcie, val, APPL_PINMUX);
1723 
1724 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1725 		    APPL_CFG_BASE_ADDR);
1726 
1727 	appl_writel(pcie, pcie->atu_dma_res->start &
1728 		    APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1729 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1730 
1731 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1732 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1733 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1734 	val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1735 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1736 
1737 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1738 	val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1739 	val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1740 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1741 
1742 	reset_control_deassert(pcie->core_rst);
1743 
1744 	if (pcie->update_fc_fixup) {
1745 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1746 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1747 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1748 	}
1749 
1750 	config_gen3_gen4_eq_presets(pcie);
1751 
1752 	init_host_aspm(pcie);
1753 
1754 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1755 	if (!pcie->supports_clkreq) {
1756 		disable_aspm_l11(pcie);
1757 		disable_aspm_l12(pcie);
1758 	}
1759 
1760 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1761 	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1762 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1763 
1764 	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1765 						      PCI_CAP_ID_EXP);
1766 
1767 	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
1768 	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
1769 	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
1770 	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
1771 
1772 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1773 
1774 	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1775 	val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1776 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1777 	val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1778 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1779 
1780 	ret = dw_pcie_ep_init_complete(ep);
1781 	if (ret) {
1782 		dev_err(dev, "Failed to complete initialization: %d\n", ret);
1783 		goto fail_init_complete;
1784 	}
1785 
1786 	dw_pcie_ep_init_notify(ep);
1787 
1788 	/* Enable LTSSM */
1789 	val = appl_readl(pcie, APPL_CTRL);
1790 	val |= APPL_CTRL_LTSSM_EN;
1791 	appl_writel(pcie, val, APPL_CTRL);
1792 
1793 	pcie->ep_state = EP_STATE_ENABLED;
1794 	dev_dbg(dev, "Initialization of endpoint is completed\n");
1795 
1796 	return;
1797 
1798 fail_init_complete:
1799 	reset_control_assert(pcie->core_rst);
1800 	tegra_pcie_disable_phy(pcie);
1801 fail_phy:
1802 	reset_control_assert(pcie->core_apb_rst);
1803 fail_core_apb_rst:
1804 	clk_disable_unprepare(pcie->core_clk);
1805 fail_core_clk_enable:
1806 	tegra_pcie_bpmp_set_pll_state(pcie, false);
1807 fail_pll_init:
1808 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1809 fail_set_ctrl_state:
1810 	pm_runtime_put_sync(dev);
1811 }
1812 
1813 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1814 {
1815 	struct tegra_pcie_dw *pcie = arg;
1816 
1817 	if (gpiod_get_value(pcie->pex_rst_gpiod))
1818 		pex_ep_event_pex_rst_assert(pcie);
1819 	else
1820 		pex_ep_event_pex_rst_deassert(pcie);
1821 
1822 	return IRQ_HANDLED;
1823 }
1824 
1825 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
1826 {
1827 	/* Tegra194 supports only INTA */
1828 	if (irq > 1)
1829 		return -EINVAL;
1830 
1831 	appl_writel(pcie, 1, APPL_LEGACY_INTX);
1832 	usleep_range(1000, 2000);
1833 	appl_writel(pcie, 0, APPL_LEGACY_INTX);
1834 	return 0;
1835 }
1836 
1837 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
1838 {
1839 	if (unlikely(irq > 31))
1840 		return -EINVAL;
1841 
1842 	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
1843 
1844 	return 0;
1845 }
1846 
1847 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
1848 {
1849 	struct dw_pcie_ep *ep = &pcie->pci.ep;
1850 
1851 	writel(irq, ep->msi_mem);
1852 
1853 	return 0;
1854 }
1855 
1856 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1857 				   enum pci_epc_irq_type type,
1858 				   u16 interrupt_num)
1859 {
1860 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1861 	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1862 
1863 	switch (type) {
1864 	case PCI_EPC_IRQ_LEGACY:
1865 		return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
1866 
1867 	case PCI_EPC_IRQ_MSI:
1868 		return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1869 
1870 	case PCI_EPC_IRQ_MSIX:
1871 		return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
1872 
1873 	default:
1874 		dev_err(pci->dev, "Unknown IRQ type\n");
1875 		return -EPERM;
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 static const struct pci_epc_features tegra_pcie_epc_features = {
1882 	.linkup_notifier = true,
1883 	.core_init_notifier = true,
1884 	.msi_capable = false,
1885 	.msix_capable = false,
1886 	.reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
1887 	.bar_fixed_64bit = 1 << BAR_0,
1888 	.bar_fixed_size[0] = SZ_1M,
1889 };
1890 
1891 static const struct pci_epc_features*
1892 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
1893 {
1894 	return &tegra_pcie_epc_features;
1895 }
1896 
1897 static const struct dw_pcie_ep_ops pcie_ep_ops = {
1898 	.raise_irq = tegra_pcie_ep_raise_irq,
1899 	.get_features = tegra_pcie_ep_get_features,
1900 };
1901 
1902 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
1903 				struct platform_device *pdev)
1904 {
1905 	struct dw_pcie *pci = &pcie->pci;
1906 	struct device *dev = pcie->dev;
1907 	struct dw_pcie_ep *ep;
1908 	char *name;
1909 	int ret;
1910 
1911 	ep = &pci->ep;
1912 	ep->ops = &pcie_ep_ops;
1913 
1914 	ep->page_size = SZ_64K;
1915 
1916 	ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
1917 	if (ret < 0) {
1918 		dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
1919 			ret);
1920 		return ret;
1921 	}
1922 
1923 	ret = gpiod_to_irq(pcie->pex_rst_gpiod);
1924 	if (ret < 0) {
1925 		dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
1926 		return ret;
1927 	}
1928 	pcie->pex_rst_irq = (unsigned int)ret;
1929 
1930 	name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
1931 			      pcie->cid);
1932 	if (!name) {
1933 		dev_err(dev, "Failed to create PERST IRQ string\n");
1934 		return -ENOMEM;
1935 	}
1936 
1937 	irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
1938 
1939 	pcie->ep_state = EP_STATE_DISABLED;
1940 
1941 	ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
1942 					tegra_pcie_ep_pex_rst_irq,
1943 					IRQF_TRIGGER_RISING |
1944 					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1945 					name, (void *)pcie);
1946 	if (ret < 0) {
1947 		dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
1948 		return ret;
1949 	}
1950 
1951 	pm_runtime_enable(dev);
1952 
1953 	ret = dw_pcie_ep_init(ep);
1954 	if (ret) {
1955 		dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
1956 			ret);
1957 		pm_runtime_disable(dev);
1958 		return ret;
1959 	}
1960 
1961 	return 0;
1962 }
1963 
1964 static int tegra_pcie_dw_probe(struct platform_device *pdev)
1965 {
1966 	const struct tegra_pcie_dw_of_data *data;
1967 	struct device *dev = &pdev->dev;
1968 	struct resource *atu_dma_res;
1969 	struct tegra_pcie_dw *pcie;
1970 	struct pcie_port *pp;
1971 	struct dw_pcie *pci;
1972 	struct phy **phys;
1973 	char *name;
1974 	int ret;
1975 	u32 i;
1976 
1977 	data = of_device_get_match_data(dev);
1978 
1979 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1980 	if (!pcie)
1981 		return -ENOMEM;
1982 
1983 	pci = &pcie->pci;
1984 	pci->dev = &pdev->dev;
1985 	pci->ops = &tegra_dw_pcie_ops;
1986 	pci->n_fts[0] = N_FTS_VAL;
1987 	pci->n_fts[1] = FTS_VAL;
1988 	pci->version = 0x490A;
1989 
1990 	pp = &pci->pp;
1991 	pp->num_vectors = MAX_MSI_IRQS;
1992 	pcie->dev = &pdev->dev;
1993 	pcie->mode = (enum dw_pcie_device_mode)data->mode;
1994 
1995 	ret = tegra_pcie_dw_parse_dt(pcie);
1996 	if (ret < 0) {
1997 		const char *level = KERN_ERR;
1998 
1999 		if (ret == -EPROBE_DEFER)
2000 			level = KERN_DEBUG;
2001 
2002 		dev_printk(level, dev,
2003 			   dev_fmt("Failed to parse device tree: %d\n"),
2004 			   ret);
2005 		return ret;
2006 	}
2007 
2008 	ret = tegra_pcie_get_slot_regulators(pcie);
2009 	if (ret < 0) {
2010 		const char *level = KERN_ERR;
2011 
2012 		if (ret == -EPROBE_DEFER)
2013 			level = KERN_DEBUG;
2014 
2015 		dev_printk(level, dev,
2016 			   dev_fmt("Failed to get slot regulators: %d\n"),
2017 			   ret);
2018 		return ret;
2019 	}
2020 
2021 	if (pcie->pex_refclk_sel_gpiod)
2022 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2023 
2024 	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2025 	if (IS_ERR(pcie->pex_ctl_supply)) {
2026 		ret = PTR_ERR(pcie->pex_ctl_supply);
2027 		if (ret != -EPROBE_DEFER)
2028 			dev_err(dev, "Failed to get regulator: %ld\n",
2029 				PTR_ERR(pcie->pex_ctl_supply));
2030 		return ret;
2031 	}
2032 
2033 	pcie->core_clk = devm_clk_get(dev, "core");
2034 	if (IS_ERR(pcie->core_clk)) {
2035 		dev_err(dev, "Failed to get core clock: %ld\n",
2036 			PTR_ERR(pcie->core_clk));
2037 		return PTR_ERR(pcie->core_clk);
2038 	}
2039 
2040 	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2041 						      "appl");
2042 	if (!pcie->appl_res) {
2043 		dev_err(dev, "Failed to find \"appl\" region\n");
2044 		return -ENODEV;
2045 	}
2046 
2047 	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2048 	if (IS_ERR(pcie->appl_base))
2049 		return PTR_ERR(pcie->appl_base);
2050 
2051 	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2052 	if (IS_ERR(pcie->core_apb_rst)) {
2053 		dev_err(dev, "Failed to get APB reset: %ld\n",
2054 			PTR_ERR(pcie->core_apb_rst));
2055 		return PTR_ERR(pcie->core_apb_rst);
2056 	}
2057 
2058 	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2059 	if (!phys)
2060 		return -ENOMEM;
2061 
2062 	for (i = 0; i < pcie->phy_count; i++) {
2063 		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2064 		if (!name) {
2065 			dev_err(dev, "Failed to create P2U string\n");
2066 			return -ENOMEM;
2067 		}
2068 		phys[i] = devm_phy_get(dev, name);
2069 		kfree(name);
2070 		if (IS_ERR(phys[i])) {
2071 			ret = PTR_ERR(phys[i]);
2072 			if (ret != -EPROBE_DEFER)
2073 				dev_err(dev, "Failed to get PHY: %d\n", ret);
2074 			return ret;
2075 		}
2076 	}
2077 
2078 	pcie->phys = phys;
2079 
2080 	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2081 						   "atu_dma");
2082 	if (!atu_dma_res) {
2083 		dev_err(dev, "Failed to find \"atu_dma\" region\n");
2084 		return -ENODEV;
2085 	}
2086 	pcie->atu_dma_res = atu_dma_res;
2087 
2088 	pci->atu_size = resource_size(atu_dma_res);
2089 	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2090 	if (IS_ERR(pci->atu_base))
2091 		return PTR_ERR(pci->atu_base);
2092 
2093 	pcie->core_rst = devm_reset_control_get(dev, "core");
2094 	if (IS_ERR(pcie->core_rst)) {
2095 		dev_err(dev, "Failed to get core reset: %ld\n",
2096 			PTR_ERR(pcie->core_rst));
2097 		return PTR_ERR(pcie->core_rst);
2098 	}
2099 
2100 	pp->irq = platform_get_irq_byname(pdev, "intr");
2101 	if (pp->irq < 0)
2102 		return pp->irq;
2103 
2104 	pcie->bpmp = tegra_bpmp_get(dev);
2105 	if (IS_ERR(pcie->bpmp))
2106 		return PTR_ERR(pcie->bpmp);
2107 
2108 	platform_set_drvdata(pdev, pcie);
2109 
2110 	switch (pcie->mode) {
2111 	case DW_PCIE_RC_TYPE:
2112 		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2113 				       IRQF_SHARED, "tegra-pcie-intr", pcie);
2114 		if (ret) {
2115 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2116 				ret);
2117 			goto fail;
2118 		}
2119 
2120 		ret = tegra_pcie_config_rp(pcie);
2121 		if (ret && ret != -ENOMEDIUM)
2122 			goto fail;
2123 		else
2124 			return 0;
2125 		break;
2126 
2127 	case DW_PCIE_EP_TYPE:
2128 		ret = devm_request_threaded_irq(dev, pp->irq,
2129 						tegra_pcie_ep_hard_irq,
2130 						tegra_pcie_ep_irq_thread,
2131 						IRQF_SHARED | IRQF_ONESHOT,
2132 						"tegra-pcie-ep-intr", pcie);
2133 		if (ret) {
2134 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2135 				ret);
2136 			goto fail;
2137 		}
2138 
2139 		ret = tegra_pcie_config_ep(pcie, pdev);
2140 		if (ret < 0)
2141 			goto fail;
2142 		break;
2143 
2144 	default:
2145 		dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
2146 	}
2147 
2148 fail:
2149 	tegra_bpmp_put(pcie->bpmp);
2150 	return ret;
2151 }
2152 
2153 static int tegra_pcie_dw_remove(struct platform_device *pdev)
2154 {
2155 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2156 
2157 	if (pcie->mode == DW_PCIE_RC_TYPE) {
2158 		if (!pcie->link_state)
2159 			return 0;
2160 
2161 		debugfs_remove_recursive(pcie->debugfs);
2162 		tegra_pcie_deinit_controller(pcie);
2163 		pm_runtime_put_sync(pcie->dev);
2164 	} else {
2165 		disable_irq(pcie->pex_rst_irq);
2166 		pex_ep_event_pex_rst_assert(pcie);
2167 	}
2168 
2169 	pm_runtime_disable(pcie->dev);
2170 	tegra_bpmp_put(pcie->bpmp);
2171 	if (pcie->pex_refclk_sel_gpiod)
2172 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2173 
2174 	return 0;
2175 }
2176 
2177 static int tegra_pcie_dw_suspend_late(struct device *dev)
2178 {
2179 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2180 	u32 val;
2181 
2182 	if (pcie->mode == DW_PCIE_EP_TYPE) {
2183 		dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
2184 		return -EPERM;
2185 	}
2186 
2187 	if (!pcie->link_state)
2188 		return 0;
2189 
2190 	/* Enable HW_HOT_RST mode */
2191 	val = appl_readl(pcie, APPL_CTRL);
2192 	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2193 		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2194 	val |= APPL_CTRL_HW_HOT_RST_EN;
2195 	appl_writel(pcie, val, APPL_CTRL);
2196 
2197 	return 0;
2198 }
2199 
2200 static int tegra_pcie_dw_suspend_noirq(struct device *dev)
2201 {
2202 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2203 
2204 	if (!pcie->link_state)
2205 		return 0;
2206 
2207 	tegra_pcie_downstream_dev_to_D0(pcie);
2208 	tegra_pcie_dw_pme_turnoff(pcie);
2209 	tegra_pcie_unconfig_controller(pcie);
2210 
2211 	return 0;
2212 }
2213 
2214 static int tegra_pcie_dw_resume_noirq(struct device *dev)
2215 {
2216 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2217 	int ret;
2218 
2219 	if (!pcie->link_state)
2220 		return 0;
2221 
2222 	ret = tegra_pcie_config_controller(pcie, true);
2223 	if (ret < 0)
2224 		return ret;
2225 
2226 	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
2227 	if (ret < 0) {
2228 		dev_err(dev, "Failed to init host: %d\n", ret);
2229 		goto fail_host_init;
2230 	}
2231 
2232 	dw_pcie_setup_rc(&pcie->pci.pp);
2233 
2234 	ret = tegra_pcie_dw_start_link(&pcie->pci);
2235 	if (ret < 0)
2236 		goto fail_host_init;
2237 
2238 	return 0;
2239 
2240 fail_host_init:
2241 	tegra_pcie_unconfig_controller(pcie);
2242 	return ret;
2243 }
2244 
2245 static int tegra_pcie_dw_resume_early(struct device *dev)
2246 {
2247 	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2248 	u32 val;
2249 
2250 	if (pcie->mode == DW_PCIE_EP_TYPE) {
2251 		dev_err(dev, "Suspend is not supported in EP mode");
2252 		return -ENOTSUPP;
2253 	}
2254 
2255 	if (!pcie->link_state)
2256 		return 0;
2257 
2258 	/* Disable HW_HOT_RST mode */
2259 	val = appl_readl(pcie, APPL_CTRL);
2260 	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2261 		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2262 	val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2263 	       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2264 	val &= ~APPL_CTRL_HW_HOT_RST_EN;
2265 	appl_writel(pcie, val, APPL_CTRL);
2266 
2267 	return 0;
2268 }
2269 
2270 static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
2271 {
2272 	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2273 
2274 	if (pcie->mode == DW_PCIE_RC_TYPE) {
2275 		if (!pcie->link_state)
2276 			return;
2277 
2278 		debugfs_remove_recursive(pcie->debugfs);
2279 		tegra_pcie_downstream_dev_to_D0(pcie);
2280 
2281 		disable_irq(pcie->pci.pp.irq);
2282 		if (IS_ENABLED(CONFIG_PCI_MSI))
2283 			disable_irq(pcie->pci.pp.msi_irq);
2284 
2285 		tegra_pcie_dw_pme_turnoff(pcie);
2286 		tegra_pcie_unconfig_controller(pcie);
2287 		pm_runtime_put_sync(pcie->dev);
2288 	} else {
2289 		disable_irq(pcie->pex_rst_irq);
2290 		pex_ep_event_pex_rst_assert(pcie);
2291 	}
2292 }
2293 
2294 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
2295 	.mode = DW_PCIE_RC_TYPE,
2296 };
2297 
2298 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
2299 	.mode = DW_PCIE_EP_TYPE,
2300 };
2301 
2302 static const struct of_device_id tegra_pcie_dw_of_match[] = {
2303 	{
2304 		.compatible = "nvidia,tegra194-pcie",
2305 		.data = &tegra_pcie_dw_rc_of_data,
2306 	},
2307 	{
2308 		.compatible = "nvidia,tegra194-pcie-ep",
2309 		.data = &tegra_pcie_dw_ep_of_data,
2310 	},
2311 	{},
2312 };
2313 
2314 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
2315 	.suspend_late = tegra_pcie_dw_suspend_late,
2316 	.suspend_noirq = tegra_pcie_dw_suspend_noirq,
2317 	.resume_noirq = tegra_pcie_dw_resume_noirq,
2318 	.resume_early = tegra_pcie_dw_resume_early,
2319 };
2320 
2321 static struct platform_driver tegra_pcie_dw_driver = {
2322 	.probe = tegra_pcie_dw_probe,
2323 	.remove = tegra_pcie_dw_remove,
2324 	.shutdown = tegra_pcie_dw_shutdown,
2325 	.driver = {
2326 		.name	= "tegra194-pcie",
2327 		.pm = &tegra_pcie_dw_pm_ops,
2328 		.of_match_table = tegra_pcie_dw_of_match,
2329 	},
2330 };
2331 module_platform_driver(tegra_pcie_dw_driver);
2332 
2333 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
2334 
2335 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2336 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2337 MODULE_LICENSE("GPL v2");
2338