1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra194 SoC
4  *
5  * Copyright (C) 2019 NVIDIA Corporation.
6  *
7  * Author: Vidya Sagar <vidyas@nvidia.com>
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/gpio.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/random.h>
30 #include <linux/reset.h>
31 #include <linux/resource.h>
32 #include <linux/types.h>
33 #include "pcie-designware.h"
34 #include <soc/tegra/bpmp.h>
35 #include <soc/tegra/bpmp-abi.h>
36 #include "../../pci.h"
37 
38 #define APPL_PINMUX				0x0
39 #define APPL_PINMUX_PEX_RST			BIT(0)
40 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
41 #define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
42 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
43 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
44 
45 #define APPL_CTRL				0x4
46 #define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
47 #define APPL_CTRL_LTSSM_EN			BIT(7)
48 #define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
49 #define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
50 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
51 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
52 
53 #define APPL_INTR_EN_L0_0			0x8
54 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
55 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
56 #define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
57 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN	BIT(15)
58 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
59 #define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
60 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
61 
62 #define APPL_INTR_STATUS_L0			0xC
63 #define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
64 #define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
65 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT	BIT(15)
66 #define APPL_INTR_STATUS_L0_PEX_RST_INT		BIT(16)
67 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
68 
69 #define APPL_INTR_EN_L1_0_0				0x1C
70 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
71 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN		BIT(3)
72 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN	BIT(30)
73 
74 #define APPL_INTR_STATUS_L1_0_0				0x20
75 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
76 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED	BIT(3)
77 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE		BIT(30)
78 
79 #define APPL_INTR_STATUS_L1_1			0x2C
80 #define APPL_INTR_STATUS_L1_2			0x30
81 #define APPL_INTR_STATUS_L1_3			0x34
82 #define APPL_INTR_STATUS_L1_6			0x3C
83 #define APPL_INTR_STATUS_L1_7			0x40
84 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED	BIT(1)
85 
86 #define APPL_INTR_EN_L1_8_0			0x44
87 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
88 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
89 #define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
90 #define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
91 
92 #define APPL_INTR_STATUS_L1_8_0			0x4C
93 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
94 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
95 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
96 
97 #define APPL_INTR_STATUS_L1_9			0x54
98 #define APPL_INTR_STATUS_L1_10			0x58
99 #define APPL_INTR_STATUS_L1_11			0x64
100 #define APPL_INTR_STATUS_L1_13			0x74
101 #define APPL_INTR_STATUS_L1_14			0x78
102 #define APPL_INTR_STATUS_L1_15			0x7C
103 #define APPL_INTR_STATUS_L1_17			0x88
104 
105 #define APPL_INTR_EN_L1_18				0x90
106 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
107 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
108 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
109 
110 #define APPL_INTR_STATUS_L1_18				0x94
111 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
112 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
113 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
114 
115 #define APPL_MSI_CTRL_1				0xAC
116 
117 #define APPL_MSI_CTRL_2				0xB0
118 
119 #define APPL_LEGACY_INTX			0xB8
120 
121 #define APPL_LTR_MSG_1				0xC4
122 #define LTR_MSG_REQ				BIT(15)
123 #define LTR_MST_NO_SNOOP_SHIFT			16
124 
125 #define APPL_LTR_MSG_2				0xC8
126 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
127 
128 #define APPL_LINK_STATUS			0xCC
129 #define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
130 
131 #define APPL_DEBUG				0xD0
132 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
133 #define APPL_DEBUG_PM_LINKST_IN_L0		0x11
134 #define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
135 #define APPL_DEBUG_LTSSM_STATE_SHIFT		3
136 #define LTSSM_STATE_PRE_DETECT			5
137 
138 #define APPL_RADM_STATUS			0xE4
139 #define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
140 
141 #define APPL_DM_TYPE				0x100
142 #define APPL_DM_TYPE_MASK			GENMASK(3, 0)
143 #define APPL_DM_TYPE_RP				0x4
144 #define APPL_DM_TYPE_EP				0x0
145 
146 #define APPL_CFG_BASE_ADDR			0x104
147 #define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
148 
149 #define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
150 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
151 
152 #define APPL_CFG_MISC				0x110
153 #define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
154 #define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
155 #define APPL_CFG_MISC_ARCACHE_SHIFT		10
156 #define APPL_CFG_MISC_ARCACHE_VAL		3
157 
158 #define APPL_CFG_SLCG_OVERRIDE			0x114
159 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
160 
161 #define APPL_CAR_RESET_OVRD				0x12C
162 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
163 
164 #define IO_BASE_IO_DECODE				BIT(0)
165 #define IO_BASE_IO_DECODE_BIT8				BIT(8)
166 
167 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
168 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
169 
170 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
171 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
172 
173 #define EVENT_COUNTER_ALL_CLEAR		0x3
174 #define EVENT_COUNTER_ENABLE_ALL	0x7
175 #define EVENT_COUNTER_ENABLE_SHIFT	2
176 #define EVENT_COUNTER_EVENT_SEL_MASK	GENMASK(7, 0)
177 #define EVENT_COUNTER_EVENT_SEL_SHIFT	16
178 #define EVENT_COUNTER_EVENT_Tx_L0S	0x2
179 #define EVENT_COUNTER_EVENT_Rx_L0S	0x3
180 #define EVENT_COUNTER_EVENT_L1		0x5
181 #define EVENT_COUNTER_EVENT_L1_1	0x7
182 #define EVENT_COUNTER_EVENT_L1_2	0x8
183 #define EVENT_COUNTER_GROUP_SEL_SHIFT	24
184 #define EVENT_COUNTER_GROUP_5		0x5
185 
186 #define N_FTS_VAL					52
187 #define FTS_VAL						52
188 
189 #define GEN3_EQ_CONTROL_OFF			0x8a8
190 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT	8
191 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)
192 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0)
193 
194 #define GEN3_RELATED_OFF			0x890
195 #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL	BIT(0)
196 #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE	BIT(16)
197 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT	24
198 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK	GENMASK(25, 24)
199 
200 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
201 #define AMBA_ERROR_RESPONSE_CRS_SHIFT		3
202 #define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0)
203 #define AMBA_ERROR_RESPONSE_CRS_OKAY		0
204 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF	1
205 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001	2
206 
207 #define MSIX_ADDR_MATCH_LOW_OFF			0x940
208 #define MSIX_ADDR_MATCH_LOW_OFF_EN		BIT(0)
209 #define MSIX_ADDR_MATCH_LOW_OFF_MASK		GENMASK(31, 2)
210 
211 #define MSIX_ADDR_MATCH_HIGH_OFF		0x944
212 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK		GENMASK(31, 0)
213 
214 #define PORT_LOGIC_MSIX_DOORBELL			0x948
215 
216 #define CAP_SPCIE_CAP_OFF			0x154
217 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
218 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
219 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
220 
221 #define PME_ACK_TIMEOUT 10000
222 
223 #define LTSSM_TIMEOUT 50000	/* 50ms */
224 
225 #define GEN3_GEN4_EQ_PRESET_INIT	5
226 
227 #define GEN1_CORE_CLK_FREQ	62500000
228 #define GEN2_CORE_CLK_FREQ	125000000
229 #define GEN3_CORE_CLK_FREQ	250000000
230 #define GEN4_CORE_CLK_FREQ	500000000
231 
232 #define LTR_MSG_TIMEOUT		(100 * 1000)
233 
234 #define PERST_DEBOUNCE_TIME	(5 * 1000)
235 
236 #define EP_STATE_DISABLED	0
237 #define EP_STATE_ENABLED	1
238 
239 static const unsigned int pcie_gen_freq[] = {
240 	GEN1_CORE_CLK_FREQ,
241 	GEN2_CORE_CLK_FREQ,
242 	GEN3_CORE_CLK_FREQ,
243 	GEN4_CORE_CLK_FREQ
244 };
245 
246 struct tegra194_pcie {
247 	struct device *dev;
248 	struct resource *appl_res;
249 	struct resource *dbi_res;
250 	struct resource *atu_dma_res;
251 	void __iomem *appl_base;
252 	struct clk *core_clk;
253 	struct reset_control *core_apb_rst;
254 	struct reset_control *core_rst;
255 	struct dw_pcie pci;
256 	struct tegra_bpmp *bpmp;
257 
258 	enum dw_pcie_device_mode mode;
259 
260 	bool supports_clkreq;
261 	bool enable_cdm_check;
262 	bool link_state;
263 	bool update_fc_fixup;
264 	u8 init_link_width;
265 	u32 msi_ctrl_int;
266 	u32 num_lanes;
267 	u32 cid;
268 	u32 cfg_link_cap_l1sub;
269 	u32 pcie_cap_base;
270 	u32 aspm_cmrt;
271 	u32 aspm_pwr_on_t;
272 	u32 aspm_l0s_enter_lat;
273 
274 	struct regulator *pex_ctl_supply;
275 	struct regulator *slot_ctl_3v3;
276 	struct regulator *slot_ctl_12v;
277 
278 	unsigned int phy_count;
279 	struct phy **phys;
280 
281 	struct dentry *debugfs;
282 
283 	/* Endpoint mode specific */
284 	struct gpio_desc *pex_rst_gpiod;
285 	struct gpio_desc *pex_refclk_sel_gpiod;
286 	unsigned int pex_rst_irq;
287 	int ep_state;
288 };
289 
290 struct tegra194_pcie_of_data {
291 	enum dw_pcie_device_mode mode;
292 };
293 
294 static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci)
295 {
296 	return container_of(pci, struct tegra194_pcie, pci);
297 }
298 
299 static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value,
300 			       const u32 reg)
301 {
302 	writel_relaxed(value, pcie->appl_base + reg);
303 }
304 
305 static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg)
306 {
307 	return readl_relaxed(pcie->appl_base + reg);
308 }
309 
310 struct tegra_pcie_soc {
311 	enum dw_pcie_device_mode mode;
312 };
313 
314 static void apply_bad_link_workaround(struct pcie_port *pp)
315 {
316 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
317 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
318 	u32 current_link_width;
319 	u16 val;
320 
321 	/*
322 	 * NOTE:- Since this scenario is uncommon and link as such is not
323 	 * stable anyway, not waiting to confirm if link is really
324 	 * transitioning to Gen-2 speed
325 	 */
326 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
327 	if (val & PCI_EXP_LNKSTA_LBMS) {
328 		current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
329 				     PCI_EXP_LNKSTA_NLW_SHIFT;
330 		if (pcie->init_link_width > current_link_width) {
331 			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
332 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
333 						PCI_EXP_LNKCTL2);
334 			val &= ~PCI_EXP_LNKCTL2_TLS;
335 			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
336 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
337 					   PCI_EXP_LNKCTL2, val);
338 
339 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
340 						PCI_EXP_LNKCTL);
341 			val |= PCI_EXP_LNKCTL_RL;
342 			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
343 					   PCI_EXP_LNKCTL, val);
344 		}
345 	}
346 }
347 
348 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
349 {
350 	struct tegra194_pcie *pcie = arg;
351 	struct dw_pcie *pci = &pcie->pci;
352 	struct pcie_port *pp = &pci->pp;
353 	u32 val, tmp;
354 	u16 val_w;
355 
356 	val = appl_readl(pcie, APPL_INTR_STATUS_L0);
357 	if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
358 		val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
359 		if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
360 			appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
361 
362 			/* SBR & Surprise Link Down WAR */
363 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
364 			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
365 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
366 			udelay(1);
367 			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
368 			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
369 			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
370 
371 			val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
372 			val |= PORT_LOGIC_SPEED_CHANGE;
373 			dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
374 		}
375 	}
376 
377 	if (val & APPL_INTR_STATUS_L0_INT_INT) {
378 		val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
379 		if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
380 			appl_writel(pcie,
381 				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
382 				    APPL_INTR_STATUS_L1_8_0);
383 			apply_bad_link_workaround(pp);
384 		}
385 		if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
386 			appl_writel(pcie,
387 				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
388 				    APPL_INTR_STATUS_L1_8_0);
389 
390 			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
391 						  PCI_EXP_LNKSTA);
392 			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
393 				PCI_EXP_LNKSTA_CLS);
394 		}
395 	}
396 
397 	val = appl_readl(pcie, APPL_INTR_STATUS_L0);
398 	if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
399 		val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
400 		tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
401 		if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
402 			dev_info(pci->dev, "CDM check complete\n");
403 			tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
404 		}
405 		if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
406 			dev_err(pci->dev, "CDM comparison mismatch\n");
407 			tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
408 		}
409 		if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
410 			dev_err(pci->dev, "CDM Logic error\n");
411 			tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
412 		}
413 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
414 		tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
415 		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
416 	}
417 
418 	return IRQ_HANDLED;
419 }
420 
421 static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
422 {
423 	u32 val;
424 
425 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
426 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
427 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
428 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
429 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
430 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
431 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
432 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
433 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
434 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
435 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
436 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
437 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
438 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
439 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
440 	appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
441 
442 	val = appl_readl(pcie, APPL_CTRL);
443 	val |= APPL_CTRL_LTSSM_EN;
444 	appl_writel(pcie, val, APPL_CTRL);
445 }
446 
447 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
448 {
449 	struct tegra194_pcie *pcie = arg;
450 	struct dw_pcie *pci = &pcie->pci;
451 	u32 val, speed;
452 
453 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
454 		PCI_EXP_LNKSTA_CLS;
455 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
456 
457 	/* If EP doesn't advertise L1SS, just return */
458 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
459 	if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
460 		return IRQ_HANDLED;
461 
462 	/* Check if BME is set to '1' */
463 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
464 	if (val & PCI_COMMAND_MASTER) {
465 		ktime_t timeout;
466 
467 		/* 110us for both snoop and no-snoop */
468 		val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
469 		val |= (val << LTR_MST_NO_SNOOP_SHIFT);
470 		appl_writel(pcie, val, APPL_LTR_MSG_1);
471 
472 		/* Send LTR upstream */
473 		val = appl_readl(pcie, APPL_LTR_MSG_2);
474 		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
475 		appl_writel(pcie, val, APPL_LTR_MSG_2);
476 
477 		timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
478 		for (;;) {
479 			val = appl_readl(pcie, APPL_LTR_MSG_2);
480 			if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
481 				break;
482 			if (ktime_after(ktime_get(), timeout))
483 				break;
484 			usleep_range(1000, 1100);
485 		}
486 		if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
487 			dev_err(pcie->dev, "Failed to send LTR message\n");
488 	}
489 
490 	return IRQ_HANDLED;
491 }
492 
493 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
494 {
495 	struct tegra194_pcie *pcie = arg;
496 	struct dw_pcie_ep *ep = &pcie->pci.ep;
497 	int spurious = 1;
498 	u32 status_l0, status_l1, link_status;
499 
500 	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
501 	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
502 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
503 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
504 
505 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
506 			pex_ep_event_hot_rst_done(pcie);
507 
508 		if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
509 			link_status = appl_readl(pcie, APPL_LINK_STATUS);
510 			if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
511 				dev_dbg(pcie->dev, "Link is up with Host\n");
512 				dw_pcie_ep_linkup(ep);
513 			}
514 		}
515 
516 		spurious = 0;
517 	}
518 
519 	if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
520 		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
521 		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
522 
523 		if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
524 			return IRQ_WAKE_THREAD;
525 
526 		spurious = 0;
527 	}
528 
529 	if (spurious) {
530 		dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
531 			 status_l0);
532 		appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
533 	}
534 
535 	return IRQ_HANDLED;
536 }
537 
538 static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
539 				     int size, u32 *val)
540 {
541 	/*
542 	 * This is an endpoint mode specific register happen to appear even
543 	 * when controller is operating in root port mode and system hangs
544 	 * when it is accessed with link being in ASPM-L1 state.
545 	 * So skip accessing it altogether
546 	 */
547 	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
548 		*val = 0x00000000;
549 		return PCIBIOS_SUCCESSFUL;
550 	}
551 
552 	return pci_generic_config_read(bus, devfn, where, size, val);
553 }
554 
555 static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
556 				     int size, u32 val)
557 {
558 	/*
559 	 * This is an endpoint mode specific register happen to appear even
560 	 * when controller is operating in root port mode and system hangs
561 	 * when it is accessed with link being in ASPM-L1 state.
562 	 * So skip accessing it altogether
563 	 */
564 	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
565 		return PCIBIOS_SUCCESSFUL;
566 
567 	return pci_generic_config_write(bus, devfn, where, size, val);
568 }
569 
570 static struct pci_ops tegra_pci_ops = {
571 	.map_bus = dw_pcie_own_conf_map_bus,
572 	.read = tegra194_pcie_rd_own_conf,
573 	.write = tegra194_pcie_wr_own_conf,
574 };
575 
576 #if defined(CONFIG_PCIEASPM)
577 static const u32 event_cntr_ctrl_offset[] = {
578 	0x1d8,
579 	0x1a8,
580 	0x1a8,
581 	0x1a8,
582 	0x1c4,
583 	0x1d8
584 };
585 
586 static const u32 event_cntr_data_offset[] = {
587 	0x1dc,
588 	0x1ac,
589 	0x1ac,
590 	0x1ac,
591 	0x1c8,
592 	0x1dc
593 };
594 
595 static void disable_aspm_l11(struct tegra194_pcie *pcie)
596 {
597 	u32 val;
598 
599 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
600 	val &= ~PCI_L1SS_CAP_ASPM_L1_1;
601 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
602 }
603 
604 static void disable_aspm_l12(struct tegra194_pcie *pcie)
605 {
606 	u32 val;
607 
608 	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
609 	val &= ~PCI_L1SS_CAP_ASPM_L1_2;
610 	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
611 }
612 
613 static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event)
614 {
615 	u32 val;
616 
617 	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
618 	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
619 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
620 	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
621 	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
622 	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
623 	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
624 
625 	return val;
626 }
627 
628 static int aspm_state_cnt(struct seq_file *s, void *data)
629 {
630 	struct tegra194_pcie *pcie = (struct tegra194_pcie *)
631 				     dev_get_drvdata(s->private);
632 	u32 val;
633 
634 	seq_printf(s, "Tx L0s entry count : %u\n",
635 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
636 
637 	seq_printf(s, "Rx L0s entry count : %u\n",
638 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
639 
640 	seq_printf(s, "Link L1 entry count : %u\n",
641 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
642 
643 	seq_printf(s, "Link L1.1 entry count : %u\n",
644 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
645 
646 	seq_printf(s, "Link L1.2 entry count : %u\n",
647 		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
648 
649 	/* Clear all counters */
650 	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
651 			   EVENT_COUNTER_ALL_CLEAR);
652 
653 	/* Re-enable counting */
654 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
655 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
656 	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
657 
658 	return 0;
659 }
660 
661 static void init_host_aspm(struct tegra194_pcie *pcie)
662 {
663 	struct dw_pcie *pci = &pcie->pci;
664 	u32 val;
665 
666 	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
667 	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
668 
669 	/* Enable ASPM counters */
670 	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
671 	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
672 	dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
673 
674 	/* Program T_cmrt and T_pwr_on values */
675 	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
676 	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
677 	val |= (pcie->aspm_cmrt << 8);
678 	val |= (pcie->aspm_pwr_on_t << 19);
679 	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
680 
681 	/* Program L0s and L1 entrance latencies */
682 	val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
683 	val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
684 	val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
685 	val |= PORT_AFR_ENTER_ASPM;
686 	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
687 }
688 
689 static void init_debugfs(struct tegra194_pcie *pcie)
690 {
691 	debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
692 				    aspm_state_cnt);
693 }
694 #else
695 static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; }
696 static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; }
697 static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; }
698 static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
699 #endif
700 
701 static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
702 {
703 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
704 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
705 	u32 val;
706 	u16 val_w;
707 
708 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
709 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
710 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
711 
712 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
713 	val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
714 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
715 
716 	if (pcie->enable_cdm_check) {
717 		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
718 		val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
719 		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
720 
721 		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
722 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
723 		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
724 		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
725 	}
726 
727 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
728 				  PCI_EXP_LNKSTA);
729 	pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
730 				PCI_EXP_LNKSTA_NLW_SHIFT;
731 
732 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
733 				  PCI_EXP_LNKCTL);
734 	val_w |= PCI_EXP_LNKCTL_LBMIE;
735 	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
736 			   val_w);
737 }
738 
739 static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
740 {
741 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
742 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
743 	u32 val;
744 
745 	/* Enable legacy interrupt generation */
746 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
747 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
748 	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
749 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
750 
751 	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
752 	val |= APPL_INTR_EN_L1_8_INTX_EN;
753 	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
754 	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
755 	if (IS_ENABLED(CONFIG_PCIEAER))
756 		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
757 	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
758 }
759 
760 static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
761 {
762 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
763 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
764 	u32 val;
765 
766 	/* Enable MSI interrupt generation */
767 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
768 	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
769 	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
770 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
771 }
772 
773 static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
774 {
775 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
776 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
777 
778 	/* Clear interrupt statuses before enabling interrupts */
779 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
780 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
781 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
782 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
783 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
784 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
785 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
786 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
787 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
788 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
789 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
790 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
791 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
792 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
793 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
794 
795 	tegra_pcie_enable_system_interrupts(pp);
796 	tegra_pcie_enable_legacy_interrupts(pp);
797 	if (IS_ENABLED(CONFIG_PCI_MSI))
798 		tegra_pcie_enable_msi_interrupts(pp);
799 }
800 
801 static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
802 {
803 	struct dw_pcie *pci = &pcie->pci;
804 	u32 val, offset, i;
805 
806 	/* Program init preset */
807 	for (i = 0; i < pcie->num_lanes; i++) {
808 		val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
809 		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
810 		val |= GEN3_GEN4_EQ_PRESET_INIT;
811 		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
812 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
813 			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
814 		dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
815 
816 		offset = dw_pcie_find_ext_capability(pci,
817 						     PCI_EXT_CAP_ID_PL_16GT) +
818 				PCI_PL_16GT_LE_CTRL;
819 		val = dw_pcie_readb_dbi(pci, offset + i);
820 		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
821 		val |= GEN3_GEN4_EQ_PRESET_INIT;
822 		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
823 		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
824 			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
825 		dw_pcie_writeb_dbi(pci, offset + i, val);
826 	}
827 
828 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
829 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
830 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
831 
832 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
833 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
834 	val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
835 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
836 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
837 
838 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
839 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
840 	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
841 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
842 
843 	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
844 	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
845 	val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
846 	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
847 	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
848 
849 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
850 	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
851 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
852 }
853 
854 static int tegra194_pcie_host_init(struct pcie_port *pp)
855 {
856 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
857 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
858 	u32 val;
859 
860 	pp->bridge->ops = &tegra_pci_ops;
861 
862 	if (!pcie->pcie_cap_base)
863 		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
864 							      PCI_CAP_ID_EXP);
865 
866 	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
867 	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
868 	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
869 
870 	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
871 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
872 	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
873 	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
874 
875 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
876 
877 	/* Enable as 0xFFFF0001 response for CRS */
878 	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
879 	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
880 	val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
881 		AMBA_ERROR_RESPONSE_CRS_SHIFT);
882 	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
883 
884 	/* Configure Max lane width from DT */
885 	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
886 	val &= ~PCI_EXP_LNKCAP_MLW;
887 	val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
888 	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
889 
890 	config_gen3_gen4_eq_presets(pcie);
891 
892 	init_host_aspm(pcie);
893 
894 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
895 	if (!pcie->supports_clkreq) {
896 		disable_aspm_l11(pcie);
897 		disable_aspm_l12(pcie);
898 	}
899 
900 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
901 	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
902 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
903 
904 	if (pcie->update_fc_fixup) {
905 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
906 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
907 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
908 	}
909 
910 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
911 
912 	return 0;
913 }
914 
915 static int tegra194_pcie_start_link(struct dw_pcie *pci)
916 {
917 	u32 val, offset, speed, tmp;
918 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
919 	struct pcie_port *pp = &pci->pp;
920 	bool retry = true;
921 
922 	if (pcie->mode == DW_PCIE_EP_TYPE) {
923 		enable_irq(pcie->pex_rst_irq);
924 		return 0;
925 	}
926 
927 retry_link:
928 	/* Assert RST */
929 	val = appl_readl(pcie, APPL_PINMUX);
930 	val &= ~APPL_PINMUX_PEX_RST;
931 	appl_writel(pcie, val, APPL_PINMUX);
932 
933 	usleep_range(100, 200);
934 
935 	/* Enable LTSSM */
936 	val = appl_readl(pcie, APPL_CTRL);
937 	val |= APPL_CTRL_LTSSM_EN;
938 	appl_writel(pcie, val, APPL_CTRL);
939 
940 	/* De-assert RST */
941 	val = appl_readl(pcie, APPL_PINMUX);
942 	val |= APPL_PINMUX_PEX_RST;
943 	appl_writel(pcie, val, APPL_PINMUX);
944 
945 	msleep(100);
946 
947 	if (dw_pcie_wait_for_link(pci)) {
948 		if (!retry)
949 			return 0;
950 		/*
951 		 * There are some endpoints which can't get the link up if
952 		 * root port has Data Link Feature (DLF) enabled.
953 		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
954 		 * on Scaled Flow Control and DLF.
955 		 * So, need to confirm that is indeed the case here and attempt
956 		 * link up once again with DLF disabled.
957 		 */
958 		val = appl_readl(pcie, APPL_DEBUG);
959 		val &= APPL_DEBUG_LTSSM_STATE_MASK;
960 		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
961 		tmp = appl_readl(pcie, APPL_LINK_STATUS);
962 		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
963 		if (!(val == 0x11 && !tmp)) {
964 			/* Link is down for all good reasons */
965 			return 0;
966 		}
967 
968 		dev_info(pci->dev, "Link is down in DLL");
969 		dev_info(pci->dev, "Trying again with DLFE disabled\n");
970 		/* Disable LTSSM */
971 		val = appl_readl(pcie, APPL_CTRL);
972 		val &= ~APPL_CTRL_LTSSM_EN;
973 		appl_writel(pcie, val, APPL_CTRL);
974 
975 		reset_control_assert(pcie->core_rst);
976 		reset_control_deassert(pcie->core_rst);
977 
978 		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
979 		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
980 		val &= ~PCI_DLF_EXCHANGE_ENABLE;
981 		dw_pcie_writel_dbi(pci, offset, val);
982 
983 		tegra194_pcie_host_init(pp);
984 		dw_pcie_setup_rc(pp);
985 
986 		retry = false;
987 		goto retry_link;
988 	}
989 
990 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
991 		PCI_EXP_LNKSTA_CLS;
992 	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
993 
994 	tegra_pcie_enable_interrupts(pp);
995 
996 	return 0;
997 }
998 
999 static int tegra194_pcie_link_up(struct dw_pcie *pci)
1000 {
1001 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
1002 	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
1003 
1004 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
1005 }
1006 
1007 static void tegra194_pcie_stop_link(struct dw_pcie *pci)
1008 {
1009 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
1010 
1011 	disable_irq(pcie->pex_rst_irq);
1012 }
1013 
1014 static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1015 	.link_up = tegra194_pcie_link_up,
1016 	.start_link = tegra194_pcie_start_link,
1017 	.stop_link = tegra194_pcie_stop_link,
1018 };
1019 
1020 static const struct dw_pcie_host_ops tegra194_pcie_host_ops = {
1021 	.host_init = tegra194_pcie_host_init,
1022 };
1023 
1024 static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
1025 {
1026 	unsigned int phy_count = pcie->phy_count;
1027 
1028 	while (phy_count--) {
1029 		phy_power_off(pcie->phys[phy_count]);
1030 		phy_exit(pcie->phys[phy_count]);
1031 	}
1032 }
1033 
1034 static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie)
1035 {
1036 	unsigned int i;
1037 	int ret;
1038 
1039 	for (i = 0; i < pcie->phy_count; i++) {
1040 		ret = phy_init(pcie->phys[i]);
1041 		if (ret < 0)
1042 			goto phy_power_off;
1043 
1044 		ret = phy_power_on(pcie->phys[i]);
1045 		if (ret < 0)
1046 			goto phy_exit;
1047 	}
1048 
1049 	return 0;
1050 
1051 phy_power_off:
1052 	while (i--) {
1053 		phy_power_off(pcie->phys[i]);
1054 phy_exit:
1055 		phy_exit(pcie->phys[i]);
1056 	}
1057 
1058 	return ret;
1059 }
1060 
1061 static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
1062 {
1063 	struct platform_device *pdev = to_platform_device(pcie->dev);
1064 	struct device_node *np = pcie->dev->of_node;
1065 	int ret;
1066 
1067 	pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1068 	if (!pcie->dbi_res) {
1069 		dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1070 		return -ENODEV;
1071 	}
1072 
1073 	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1074 	if (ret < 0) {
1075 		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1076 		return ret;
1077 	}
1078 
1079 	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1080 				   &pcie->aspm_pwr_on_t);
1081 	if (ret < 0)
1082 		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1083 			 ret);
1084 
1085 	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1086 				   &pcie->aspm_l0s_enter_lat);
1087 	if (ret < 0)
1088 		dev_info(pcie->dev,
1089 			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1090 
1091 	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1092 	if (ret < 0) {
1093 		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1094 		return ret;
1095 	}
1096 
1097 	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1098 	if (ret) {
1099 		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1100 		return ret;
1101 	}
1102 
1103 	ret = of_property_count_strings(np, "phy-names");
1104 	if (ret < 0) {
1105 		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1106 			ret);
1107 		return ret;
1108 	}
1109 	pcie->phy_count = ret;
1110 
1111 	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1112 		pcie->update_fc_fixup = true;
1113 
1114 	pcie->supports_clkreq =
1115 		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1116 
1117 	pcie->enable_cdm_check =
1118 		of_property_read_bool(np, "snps,enable-cdm-check");
1119 
1120 	if (pcie->mode == DW_PCIE_RC_TYPE)
1121 		return 0;
1122 
1123 	/* Endpoint mode specific DT entries */
1124 	pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1125 	if (IS_ERR(pcie->pex_rst_gpiod)) {
1126 		int err = PTR_ERR(pcie->pex_rst_gpiod);
1127 		const char *level = KERN_ERR;
1128 
1129 		if (err == -EPROBE_DEFER)
1130 			level = KERN_DEBUG;
1131 
1132 		dev_printk(level, pcie->dev,
1133 			   dev_fmt("Failed to get PERST GPIO: %d\n"),
1134 			   err);
1135 		return err;
1136 	}
1137 
1138 	pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1139 						    "nvidia,refclk-select",
1140 						    GPIOD_OUT_HIGH);
1141 	if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1142 		int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1143 		const char *level = KERN_ERR;
1144 
1145 		if (err == -EPROBE_DEFER)
1146 			level = KERN_DEBUG;
1147 
1148 		dev_printk(level, pcie->dev,
1149 			   dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1150 			   err);
1151 		pcie->pex_refclk_sel_gpiod = NULL;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
1158 					  bool enable)
1159 {
1160 	struct mrq_uphy_response resp;
1161 	struct tegra_bpmp_message msg;
1162 	struct mrq_uphy_request req;
1163 
1164 	/* Controller-5 doesn't need to have its state set by BPMP-FW */
1165 	if (pcie->cid == 5)
1166 		return 0;
1167 
1168 	memset(&req, 0, sizeof(req));
1169 	memset(&resp, 0, sizeof(resp));
1170 
1171 	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1172 	req.controller_state.pcie_controller = pcie->cid;
1173 	req.controller_state.enable = enable;
1174 
1175 	memset(&msg, 0, sizeof(msg));
1176 	msg.mrq = MRQ_UPHY;
1177 	msg.tx.data = &req;
1178 	msg.tx.size = sizeof(req);
1179 	msg.rx.data = &resp;
1180 	msg.rx.size = sizeof(resp);
1181 
1182 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1183 }
1184 
1185 static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
1186 					 bool enable)
1187 {
1188 	struct mrq_uphy_response resp;
1189 	struct tegra_bpmp_message msg;
1190 	struct mrq_uphy_request req;
1191 
1192 	memset(&req, 0, sizeof(req));
1193 	memset(&resp, 0, sizeof(resp));
1194 
1195 	if (enable) {
1196 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1197 		req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1198 	} else {
1199 		req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1200 		req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1201 	}
1202 
1203 	memset(&msg, 0, sizeof(msg));
1204 	msg.mrq = MRQ_UPHY;
1205 	msg.tx.data = &req;
1206 	msg.tx.size = sizeof(req);
1207 	msg.rx.data = &resp;
1208 	msg.rx.size = sizeof(resp);
1209 
1210 	return tegra_bpmp_transfer(pcie->bpmp, &msg);
1211 }
1212 
1213 static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
1214 {
1215 	struct pcie_port *pp = &pcie->pci.pp;
1216 	struct pci_bus *child, *root_bus = NULL;
1217 	struct pci_dev *pdev;
1218 
1219 	/*
1220 	 * link doesn't go into L2 state with some of the endpoints with Tegra
1221 	 * if they are not in D0 state. So, need to make sure that immediate
1222 	 * downstream devices are in D0 state before sending PME_TurnOff to put
1223 	 * link into L2 state.
1224 	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1225 	 * 5.2 Link State Power Management (Page #428).
1226 	 */
1227 
1228 	list_for_each_entry(child, &pp->bridge->bus->children, node) {
1229 		/* Bring downstream devices to D0 if they are not already in */
1230 		if (child->parent == pp->bridge->bus) {
1231 			root_bus = child;
1232 			break;
1233 		}
1234 	}
1235 
1236 	if (!root_bus) {
1237 		dev_err(pcie->dev, "Failed to find downstream devices\n");
1238 		return;
1239 	}
1240 
1241 	list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1242 		if (PCI_SLOT(pdev->devfn) == 0) {
1243 			if (pci_set_power_state(pdev, PCI_D0))
1244 				dev_err(pcie->dev,
1245 					"Failed to transition %s to D0 state\n",
1246 					dev_name(&pdev->dev));
1247 		}
1248 	}
1249 }
1250 
1251 static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
1252 {
1253 	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1254 	if (IS_ERR(pcie->slot_ctl_3v3)) {
1255 		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1256 			return PTR_ERR(pcie->slot_ctl_3v3);
1257 
1258 		pcie->slot_ctl_3v3 = NULL;
1259 	}
1260 
1261 	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1262 	if (IS_ERR(pcie->slot_ctl_12v)) {
1263 		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1264 			return PTR_ERR(pcie->slot_ctl_12v);
1265 
1266 		pcie->slot_ctl_12v = NULL;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie)
1273 {
1274 	int ret;
1275 
1276 	if (pcie->slot_ctl_3v3) {
1277 		ret = regulator_enable(pcie->slot_ctl_3v3);
1278 		if (ret < 0) {
1279 			dev_err(pcie->dev,
1280 				"Failed to enable 3.3V slot supply: %d\n", ret);
1281 			return ret;
1282 		}
1283 	}
1284 
1285 	if (pcie->slot_ctl_12v) {
1286 		ret = regulator_enable(pcie->slot_ctl_12v);
1287 		if (ret < 0) {
1288 			dev_err(pcie->dev,
1289 				"Failed to enable 12V slot supply: %d\n", ret);
1290 			goto fail_12v_enable;
1291 		}
1292 	}
1293 
1294 	/*
1295 	 * According to PCI Express Card Electromechanical Specification
1296 	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1297 	 * should be a minimum of 100ms.
1298 	 */
1299 	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1300 		msleep(100);
1301 
1302 	return 0;
1303 
1304 fail_12v_enable:
1305 	if (pcie->slot_ctl_3v3)
1306 		regulator_disable(pcie->slot_ctl_3v3);
1307 	return ret;
1308 }
1309 
1310 static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
1311 {
1312 	if (pcie->slot_ctl_12v)
1313 		regulator_disable(pcie->slot_ctl_12v);
1314 	if (pcie->slot_ctl_3v3)
1315 		regulator_disable(pcie->slot_ctl_3v3);
1316 }
1317 
1318 static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
1319 					bool en_hw_hot_rst)
1320 {
1321 	int ret;
1322 	u32 val;
1323 
1324 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1325 	if (ret) {
1326 		dev_err(pcie->dev,
1327 			"Failed to enable controller %u: %d\n", pcie->cid, ret);
1328 		return ret;
1329 	}
1330 
1331 	ret = tegra_pcie_enable_slot_regulators(pcie);
1332 	if (ret < 0)
1333 		goto fail_slot_reg_en;
1334 
1335 	ret = regulator_enable(pcie->pex_ctl_supply);
1336 	if (ret < 0) {
1337 		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1338 		goto fail_reg_en;
1339 	}
1340 
1341 	ret = clk_prepare_enable(pcie->core_clk);
1342 	if (ret) {
1343 		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1344 		goto fail_core_clk;
1345 	}
1346 
1347 	ret = reset_control_deassert(pcie->core_apb_rst);
1348 	if (ret) {
1349 		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1350 			ret);
1351 		goto fail_core_apb_rst;
1352 	}
1353 
1354 	if (en_hw_hot_rst) {
1355 		/* Enable HW_HOT_RST mode */
1356 		val = appl_readl(pcie, APPL_CTRL);
1357 		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1358 			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1359 		val |= APPL_CTRL_HW_HOT_RST_EN;
1360 		appl_writel(pcie, val, APPL_CTRL);
1361 	}
1362 
1363 	ret = tegra_pcie_enable_phy(pcie);
1364 	if (ret) {
1365 		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1366 		goto fail_phy;
1367 	}
1368 
1369 	/* Update CFG base address */
1370 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1371 		    APPL_CFG_BASE_ADDR);
1372 
1373 	/* Configure this core for RP mode operation */
1374 	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1375 
1376 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1377 
1378 	val = appl_readl(pcie, APPL_CTRL);
1379 	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1380 
1381 	val = appl_readl(pcie, APPL_CFG_MISC);
1382 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1383 	appl_writel(pcie, val, APPL_CFG_MISC);
1384 
1385 	if (!pcie->supports_clkreq) {
1386 		val = appl_readl(pcie, APPL_PINMUX);
1387 		val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1388 		val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1389 		appl_writel(pcie, val, APPL_PINMUX);
1390 	}
1391 
1392 	/* Update iATU_DMA base address */
1393 	appl_writel(pcie,
1394 		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1395 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1396 
1397 	reset_control_deassert(pcie->core_rst);
1398 
1399 	return ret;
1400 
1401 fail_phy:
1402 	reset_control_assert(pcie->core_apb_rst);
1403 fail_core_apb_rst:
1404 	clk_disable_unprepare(pcie->core_clk);
1405 fail_core_clk:
1406 	regulator_disable(pcie->pex_ctl_supply);
1407 fail_reg_en:
1408 	tegra_pcie_disable_slot_regulators(pcie);
1409 fail_slot_reg_en:
1410 	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1411 
1412 	return ret;
1413 }
1414 
1415 static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
1416 {
1417 	int ret;
1418 
1419 	ret = reset_control_assert(pcie->core_rst);
1420 	if (ret)
1421 		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1422 
1423 	tegra_pcie_disable_phy(pcie);
1424 
1425 	ret = reset_control_assert(pcie->core_apb_rst);
1426 	if (ret)
1427 		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1428 
1429 	clk_disable_unprepare(pcie->core_clk);
1430 
1431 	ret = regulator_disable(pcie->pex_ctl_supply);
1432 	if (ret)
1433 		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1434 
1435 	tegra_pcie_disable_slot_regulators(pcie);
1436 
1437 	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1438 	if (ret)
1439 		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1440 			pcie->cid, ret);
1441 }
1442 
1443 static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
1444 {
1445 	struct dw_pcie *pci = &pcie->pci;
1446 	struct pcie_port *pp = &pci->pp;
1447 	int ret;
1448 
1449 	ret = tegra_pcie_config_controller(pcie, false);
1450 	if (ret < 0)
1451 		return ret;
1452 
1453 	pp->ops = &tegra194_pcie_host_ops;
1454 
1455 	ret = dw_pcie_host_init(pp);
1456 	if (ret < 0) {
1457 		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1458 		goto fail_host_init;
1459 	}
1460 
1461 	return 0;
1462 
1463 fail_host_init:
1464 	tegra_pcie_unconfig_controller(pcie);
1465 	return ret;
1466 }
1467 
1468 static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
1469 {
1470 	u32 val;
1471 
1472 	if (!tegra194_pcie_link_up(&pcie->pci))
1473 		return 0;
1474 
1475 	val = appl_readl(pcie, APPL_RADM_STATUS);
1476 	val |= APPL_PM_XMT_TURNOFF_STATE;
1477 	appl_writel(pcie, val, APPL_RADM_STATUS);
1478 
1479 	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1480 				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1481 				 1, PME_ACK_TIMEOUT);
1482 }
1483 
1484 static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
1485 {
1486 	u32 data;
1487 	int err;
1488 
1489 	if (!tegra194_pcie_link_up(&pcie->pci)) {
1490 		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1491 		return;
1492 	}
1493 
1494 	/*
1495 	 * PCIe controller exits from L2 only if reset is applied, so
1496 	 * controller doesn't handle interrupts. But in cases where
1497 	 * L2 entry fails, PERST# is asserted which can trigger surprise
1498 	 * link down AER. However this function call happens in
1499 	 * suspend_noirq(), so AER interrupt will not be processed.
1500 	 * Disable all interrupts to avoid such a scenario.
1501 	 */
1502 	appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
1503 
1504 	if (tegra_pcie_try_link_l2(pcie)) {
1505 		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1506 		/*
1507 		 * TX lane clock freq will reset to Gen1 only if link is in L2
1508 		 * or detect state.
1509 		 * So apply pex_rst to end point to force RP to go into detect
1510 		 * state
1511 		 */
1512 		data = appl_readl(pcie, APPL_PINMUX);
1513 		data &= ~APPL_PINMUX_PEX_RST;
1514 		appl_writel(pcie, data, APPL_PINMUX);
1515 
1516 		/*
1517 		 * Some cards do not go to detect state even after de-asserting
1518 		 * PERST#. So, de-assert LTSSM to bring link to detect state.
1519 		 */
1520 		data = readl(pcie->appl_base + APPL_CTRL);
1521 		data &= ~APPL_CTRL_LTSSM_EN;
1522 		writel(data, pcie->appl_base + APPL_CTRL);
1523 
1524 		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1525 						data,
1526 						((data &
1527 						APPL_DEBUG_LTSSM_STATE_MASK) >>
1528 						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1529 						LTSSM_STATE_PRE_DETECT,
1530 						1, LTSSM_TIMEOUT);
1531 		if (err)
1532 			dev_info(pcie->dev, "Link didn't go to detect state\n");
1533 	}
1534 	/*
1535 	 * DBI registers may not be accessible after this as PLL-E would be
1536 	 * down depending on how CLKREQ is pulled by end point
1537 	 */
1538 	data = appl_readl(pcie, APPL_PINMUX);
1539 	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1540 	/* Cut REFCLK to slot */
1541 	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1542 	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1543 	appl_writel(pcie, data, APPL_PINMUX);
1544 }
1545 
1546 static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie)
1547 {
1548 	tegra_pcie_downstream_dev_to_D0(pcie);
1549 	dw_pcie_host_deinit(&pcie->pci.pp);
1550 	tegra194_pcie_pme_turnoff(pcie);
1551 	tegra_pcie_unconfig_controller(pcie);
1552 }
1553 
1554 static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
1555 {
1556 	struct device *dev = pcie->dev;
1557 	char *name;
1558 	int ret;
1559 
1560 	pm_runtime_enable(dev);
1561 
1562 	ret = pm_runtime_get_sync(dev);
1563 	if (ret < 0) {
1564 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1565 			ret);
1566 		goto fail_pm_get_sync;
1567 	}
1568 
1569 	ret = pinctrl_pm_select_default_state(dev);
1570 	if (ret < 0) {
1571 		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1572 		goto fail_pm_get_sync;
1573 	}
1574 
1575 	ret = tegra_pcie_init_controller(pcie);
1576 	if (ret < 0) {
1577 		dev_err(dev, "Failed to initialize controller: %d\n", ret);
1578 		goto fail_pm_get_sync;
1579 	}
1580 
1581 	pcie->link_state = tegra194_pcie_link_up(&pcie->pci);
1582 	if (!pcie->link_state) {
1583 		ret = -ENOMEDIUM;
1584 		goto fail_host_init;
1585 	}
1586 
1587 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1588 	if (!name) {
1589 		ret = -ENOMEM;
1590 		goto fail_host_init;
1591 	}
1592 
1593 	pcie->debugfs = debugfs_create_dir(name, NULL);
1594 	init_debugfs(pcie);
1595 
1596 	return ret;
1597 
1598 fail_host_init:
1599 	tegra_pcie_deinit_controller(pcie);
1600 fail_pm_get_sync:
1601 	pm_runtime_put_sync(dev);
1602 	pm_runtime_disable(dev);
1603 	return ret;
1604 }
1605 
1606 static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
1607 {
1608 	u32 val;
1609 	int ret;
1610 
1611 	if (pcie->ep_state == EP_STATE_DISABLED)
1612 		return;
1613 
1614 	/* Disable LTSSM */
1615 	val = appl_readl(pcie, APPL_CTRL);
1616 	val &= ~APPL_CTRL_LTSSM_EN;
1617 	appl_writel(pcie, val, APPL_CTRL);
1618 
1619 	ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1620 				 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1621 				 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1622 				 LTSSM_STATE_PRE_DETECT,
1623 				 1, LTSSM_TIMEOUT);
1624 	if (ret)
1625 		dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1626 
1627 	reset_control_assert(pcie->core_rst);
1628 
1629 	tegra_pcie_disable_phy(pcie);
1630 
1631 	reset_control_assert(pcie->core_apb_rst);
1632 
1633 	clk_disable_unprepare(pcie->core_clk);
1634 
1635 	pm_runtime_put_sync(pcie->dev);
1636 
1637 	ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1638 	if (ret)
1639 		dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
1640 
1641 	pcie->ep_state = EP_STATE_DISABLED;
1642 	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1643 }
1644 
1645 static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
1646 {
1647 	struct dw_pcie *pci = &pcie->pci;
1648 	struct dw_pcie_ep *ep = &pci->ep;
1649 	struct device *dev = pcie->dev;
1650 	u32 val;
1651 	int ret;
1652 
1653 	if (pcie->ep_state == EP_STATE_ENABLED)
1654 		return;
1655 
1656 	ret = pm_runtime_resume_and_get(dev);
1657 	if (ret < 0) {
1658 		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1659 			ret);
1660 		return;
1661 	}
1662 
1663 	ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1664 	if (ret) {
1665 		dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
1666 		goto fail_pll_init;
1667 	}
1668 
1669 	ret = clk_prepare_enable(pcie->core_clk);
1670 	if (ret) {
1671 		dev_err(dev, "Failed to enable core clock: %d\n", ret);
1672 		goto fail_core_clk_enable;
1673 	}
1674 
1675 	ret = reset_control_deassert(pcie->core_apb_rst);
1676 	if (ret) {
1677 		dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1678 		goto fail_core_apb_rst;
1679 	}
1680 
1681 	ret = tegra_pcie_enable_phy(pcie);
1682 	if (ret) {
1683 		dev_err(dev, "Failed to enable PHY: %d\n", ret);
1684 		goto fail_phy;
1685 	}
1686 
1687 	/* Clear any stale interrupt statuses */
1688 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1689 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1690 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1691 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1692 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1693 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1694 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1695 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1696 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1697 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1698 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1699 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1700 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1701 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1702 	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1703 
1704 	/* configure this core for EP mode operation */
1705 	val = appl_readl(pcie, APPL_DM_TYPE);
1706 	val &= ~APPL_DM_TYPE_MASK;
1707 	val |= APPL_DM_TYPE_EP;
1708 	appl_writel(pcie, val, APPL_DM_TYPE);
1709 
1710 	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1711 
1712 	val = appl_readl(pcie, APPL_CTRL);
1713 	val |= APPL_CTRL_SYS_PRE_DET_STATE;
1714 	val |= APPL_CTRL_HW_HOT_RST_EN;
1715 	appl_writel(pcie, val, APPL_CTRL);
1716 
1717 	val = appl_readl(pcie, APPL_CFG_MISC);
1718 	val |= APPL_CFG_MISC_SLV_EP_MODE;
1719 	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1720 	appl_writel(pcie, val, APPL_CFG_MISC);
1721 
1722 	val = appl_readl(pcie, APPL_PINMUX);
1723 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1724 	val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1725 	appl_writel(pcie, val, APPL_PINMUX);
1726 
1727 	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1728 		    APPL_CFG_BASE_ADDR);
1729 
1730 	appl_writel(pcie, pcie->atu_dma_res->start &
1731 		    APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1732 		    APPL_CFG_IATU_DMA_BASE_ADDR);
1733 
1734 	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1735 	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1736 	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1737 	val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1738 	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1739 
1740 	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1741 	val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1742 	val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1743 	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1744 
1745 	reset_control_deassert(pcie->core_rst);
1746 
1747 	if (pcie->update_fc_fixup) {
1748 		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1749 		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1750 		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1751 	}
1752 
1753 	config_gen3_gen4_eq_presets(pcie);
1754 
1755 	init_host_aspm(pcie);
1756 
1757 	/* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1758 	if (!pcie->supports_clkreq) {
1759 		disable_aspm_l11(pcie);
1760 		disable_aspm_l12(pcie);
1761 	}
1762 
1763 	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1764 	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1765 	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1766 
1767 	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1768 						      PCI_CAP_ID_EXP);
1769 	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1770 
1771 	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1772 	val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1773 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1774 	val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1775 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1776 
1777 	ret = dw_pcie_ep_init_complete(ep);
1778 	if (ret) {
1779 		dev_err(dev, "Failed to complete initialization: %d\n", ret);
1780 		goto fail_init_complete;
1781 	}
1782 
1783 	dw_pcie_ep_init_notify(ep);
1784 
1785 	/* Enable LTSSM */
1786 	val = appl_readl(pcie, APPL_CTRL);
1787 	val |= APPL_CTRL_LTSSM_EN;
1788 	appl_writel(pcie, val, APPL_CTRL);
1789 
1790 	pcie->ep_state = EP_STATE_ENABLED;
1791 	dev_dbg(dev, "Initialization of endpoint is completed\n");
1792 
1793 	return;
1794 
1795 fail_init_complete:
1796 	reset_control_assert(pcie->core_rst);
1797 	tegra_pcie_disable_phy(pcie);
1798 fail_phy:
1799 	reset_control_assert(pcie->core_apb_rst);
1800 fail_core_apb_rst:
1801 	clk_disable_unprepare(pcie->core_clk);
1802 fail_core_clk_enable:
1803 	tegra_pcie_bpmp_set_pll_state(pcie, false);
1804 fail_pll_init:
1805 	pm_runtime_put_sync(dev);
1806 }
1807 
1808 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1809 {
1810 	struct tegra194_pcie *pcie = arg;
1811 
1812 	if (gpiod_get_value(pcie->pex_rst_gpiod))
1813 		pex_ep_event_pex_rst_assert(pcie);
1814 	else
1815 		pex_ep_event_pex_rst_deassert(pcie);
1816 
1817 	return IRQ_HANDLED;
1818 }
1819 
1820 static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
1821 {
1822 	/* Tegra194 supports only INTA */
1823 	if (irq > 1)
1824 		return -EINVAL;
1825 
1826 	appl_writel(pcie, 1, APPL_LEGACY_INTX);
1827 	usleep_range(1000, 2000);
1828 	appl_writel(pcie, 0, APPL_LEGACY_INTX);
1829 	return 0;
1830 }
1831 
1832 static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
1833 {
1834 	if (unlikely(irq > 31))
1835 		return -EINVAL;
1836 
1837 	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
1838 
1839 	return 0;
1840 }
1841 
1842 static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq)
1843 {
1844 	struct dw_pcie_ep *ep = &pcie->pci.ep;
1845 
1846 	writel(irq, ep->msi_mem);
1847 
1848 	return 0;
1849 }
1850 
1851 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1852 				   enum pci_epc_irq_type type,
1853 				   u16 interrupt_num)
1854 {
1855 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1856 	struct tegra194_pcie *pcie = to_tegra_pcie(pci);
1857 
1858 	switch (type) {
1859 	case PCI_EPC_IRQ_LEGACY:
1860 		return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
1861 
1862 	case PCI_EPC_IRQ_MSI:
1863 		return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1864 
1865 	case PCI_EPC_IRQ_MSIX:
1866 		return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
1867 
1868 	default:
1869 		dev_err(pci->dev, "Unknown IRQ type\n");
1870 		return -EPERM;
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static const struct pci_epc_features tegra_pcie_epc_features = {
1877 	.linkup_notifier = true,
1878 	.core_init_notifier = true,
1879 	.msi_capable = false,
1880 	.msix_capable = false,
1881 	.reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
1882 	.bar_fixed_64bit = 1 << BAR_0,
1883 	.bar_fixed_size[0] = SZ_1M,
1884 };
1885 
1886 static const struct pci_epc_features*
1887 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
1888 {
1889 	return &tegra_pcie_epc_features;
1890 }
1891 
1892 static const struct dw_pcie_ep_ops pcie_ep_ops = {
1893 	.raise_irq = tegra_pcie_ep_raise_irq,
1894 	.get_features = tegra_pcie_ep_get_features,
1895 };
1896 
1897 static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
1898 				struct platform_device *pdev)
1899 {
1900 	struct dw_pcie *pci = &pcie->pci;
1901 	struct device *dev = pcie->dev;
1902 	struct dw_pcie_ep *ep;
1903 	char *name;
1904 	int ret;
1905 
1906 	ep = &pci->ep;
1907 	ep->ops = &pcie_ep_ops;
1908 
1909 	ep->page_size = SZ_64K;
1910 
1911 	ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
1912 	if (ret < 0) {
1913 		dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
1914 			ret);
1915 		return ret;
1916 	}
1917 
1918 	ret = gpiod_to_irq(pcie->pex_rst_gpiod);
1919 	if (ret < 0) {
1920 		dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
1921 		return ret;
1922 	}
1923 	pcie->pex_rst_irq = (unsigned int)ret;
1924 
1925 	name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
1926 			      pcie->cid);
1927 	if (!name) {
1928 		dev_err(dev, "Failed to create PERST IRQ string\n");
1929 		return -ENOMEM;
1930 	}
1931 
1932 	irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
1933 
1934 	pcie->ep_state = EP_STATE_DISABLED;
1935 
1936 	ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
1937 					tegra_pcie_ep_pex_rst_irq,
1938 					IRQF_TRIGGER_RISING |
1939 					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1940 					name, (void *)pcie);
1941 	if (ret < 0) {
1942 		dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
1943 		return ret;
1944 	}
1945 
1946 	pm_runtime_enable(dev);
1947 
1948 	ret = dw_pcie_ep_init(ep);
1949 	if (ret) {
1950 		dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
1951 			ret);
1952 		return ret;
1953 	}
1954 
1955 	return 0;
1956 }
1957 
1958 static int tegra194_pcie_probe(struct platform_device *pdev)
1959 {
1960 	const struct tegra194_pcie_of_data *data;
1961 	struct device *dev = &pdev->dev;
1962 	struct resource *atu_dma_res;
1963 	struct tegra194_pcie *pcie;
1964 	struct pcie_port *pp;
1965 	struct dw_pcie *pci;
1966 	struct phy **phys;
1967 	char *name;
1968 	int ret;
1969 	u32 i;
1970 
1971 	data = of_device_get_match_data(dev);
1972 
1973 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1974 	if (!pcie)
1975 		return -ENOMEM;
1976 
1977 	pci = &pcie->pci;
1978 	pci->dev = &pdev->dev;
1979 	pci->ops = &tegra_dw_pcie_ops;
1980 	pci->n_fts[0] = N_FTS_VAL;
1981 	pci->n_fts[1] = FTS_VAL;
1982 	pci->version = 0x490A;
1983 
1984 	pp = &pci->pp;
1985 	pp->num_vectors = MAX_MSI_IRQS;
1986 	pcie->dev = &pdev->dev;
1987 	pcie->mode = (enum dw_pcie_device_mode)data->mode;
1988 
1989 	ret = tegra194_pcie_parse_dt(pcie);
1990 	if (ret < 0) {
1991 		const char *level = KERN_ERR;
1992 
1993 		if (ret == -EPROBE_DEFER)
1994 			level = KERN_DEBUG;
1995 
1996 		dev_printk(level, dev,
1997 			   dev_fmt("Failed to parse device tree: %d\n"),
1998 			   ret);
1999 		return ret;
2000 	}
2001 
2002 	ret = tegra_pcie_get_slot_regulators(pcie);
2003 	if (ret < 0) {
2004 		const char *level = KERN_ERR;
2005 
2006 		if (ret == -EPROBE_DEFER)
2007 			level = KERN_DEBUG;
2008 
2009 		dev_printk(level, dev,
2010 			   dev_fmt("Failed to get slot regulators: %d\n"),
2011 			   ret);
2012 		return ret;
2013 	}
2014 
2015 	if (pcie->pex_refclk_sel_gpiod)
2016 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2017 
2018 	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2019 	if (IS_ERR(pcie->pex_ctl_supply)) {
2020 		ret = PTR_ERR(pcie->pex_ctl_supply);
2021 		if (ret != -EPROBE_DEFER)
2022 			dev_err(dev, "Failed to get regulator: %ld\n",
2023 				PTR_ERR(pcie->pex_ctl_supply));
2024 		return ret;
2025 	}
2026 
2027 	pcie->core_clk = devm_clk_get(dev, "core");
2028 	if (IS_ERR(pcie->core_clk)) {
2029 		dev_err(dev, "Failed to get core clock: %ld\n",
2030 			PTR_ERR(pcie->core_clk));
2031 		return PTR_ERR(pcie->core_clk);
2032 	}
2033 
2034 	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2035 						      "appl");
2036 	if (!pcie->appl_res) {
2037 		dev_err(dev, "Failed to find \"appl\" region\n");
2038 		return -ENODEV;
2039 	}
2040 
2041 	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2042 	if (IS_ERR(pcie->appl_base))
2043 		return PTR_ERR(pcie->appl_base);
2044 
2045 	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2046 	if (IS_ERR(pcie->core_apb_rst)) {
2047 		dev_err(dev, "Failed to get APB reset: %ld\n",
2048 			PTR_ERR(pcie->core_apb_rst));
2049 		return PTR_ERR(pcie->core_apb_rst);
2050 	}
2051 
2052 	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2053 	if (!phys)
2054 		return -ENOMEM;
2055 
2056 	for (i = 0; i < pcie->phy_count; i++) {
2057 		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2058 		if (!name) {
2059 			dev_err(dev, "Failed to create P2U string\n");
2060 			return -ENOMEM;
2061 		}
2062 		phys[i] = devm_phy_get(dev, name);
2063 		kfree(name);
2064 		if (IS_ERR(phys[i])) {
2065 			ret = PTR_ERR(phys[i]);
2066 			if (ret != -EPROBE_DEFER)
2067 				dev_err(dev, "Failed to get PHY: %d\n", ret);
2068 			return ret;
2069 		}
2070 	}
2071 
2072 	pcie->phys = phys;
2073 
2074 	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2075 						   "atu_dma");
2076 	if (!atu_dma_res) {
2077 		dev_err(dev, "Failed to find \"atu_dma\" region\n");
2078 		return -ENODEV;
2079 	}
2080 	pcie->atu_dma_res = atu_dma_res;
2081 
2082 	pci->atu_size = resource_size(atu_dma_res);
2083 	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2084 	if (IS_ERR(pci->atu_base))
2085 		return PTR_ERR(pci->atu_base);
2086 
2087 	pcie->core_rst = devm_reset_control_get(dev, "core");
2088 	if (IS_ERR(pcie->core_rst)) {
2089 		dev_err(dev, "Failed to get core reset: %ld\n",
2090 			PTR_ERR(pcie->core_rst));
2091 		return PTR_ERR(pcie->core_rst);
2092 	}
2093 
2094 	pp->irq = platform_get_irq_byname(pdev, "intr");
2095 	if (pp->irq < 0)
2096 		return pp->irq;
2097 
2098 	pcie->bpmp = tegra_bpmp_get(dev);
2099 	if (IS_ERR(pcie->bpmp))
2100 		return PTR_ERR(pcie->bpmp);
2101 
2102 	platform_set_drvdata(pdev, pcie);
2103 
2104 	switch (pcie->mode) {
2105 	case DW_PCIE_RC_TYPE:
2106 		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2107 				       IRQF_SHARED, "tegra-pcie-intr", pcie);
2108 		if (ret) {
2109 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2110 				ret);
2111 			goto fail;
2112 		}
2113 
2114 		ret = tegra_pcie_config_rp(pcie);
2115 		if (ret && ret != -ENOMEDIUM)
2116 			goto fail;
2117 		else
2118 			return 0;
2119 		break;
2120 
2121 	case DW_PCIE_EP_TYPE:
2122 		ret = devm_request_threaded_irq(dev, pp->irq,
2123 						tegra_pcie_ep_hard_irq,
2124 						tegra_pcie_ep_irq_thread,
2125 						IRQF_SHARED | IRQF_ONESHOT,
2126 						"tegra-pcie-ep-intr", pcie);
2127 		if (ret) {
2128 			dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2129 				ret);
2130 			goto fail;
2131 		}
2132 
2133 		ret = tegra_pcie_config_ep(pcie, pdev);
2134 		if (ret < 0)
2135 			goto fail;
2136 		break;
2137 
2138 	default:
2139 		dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
2140 	}
2141 
2142 fail:
2143 	tegra_bpmp_put(pcie->bpmp);
2144 	return ret;
2145 }
2146 
2147 static int tegra194_pcie_remove(struct platform_device *pdev)
2148 {
2149 	struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
2150 
2151 	if (!pcie->link_state)
2152 		return 0;
2153 
2154 	debugfs_remove_recursive(pcie->debugfs);
2155 	tegra_pcie_deinit_controller(pcie);
2156 	pm_runtime_put_sync(pcie->dev);
2157 	pm_runtime_disable(pcie->dev);
2158 	tegra_bpmp_put(pcie->bpmp);
2159 	if (pcie->pex_refclk_sel_gpiod)
2160 		gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2161 
2162 	return 0;
2163 }
2164 
2165 static int tegra194_pcie_suspend_late(struct device *dev)
2166 {
2167 	struct tegra194_pcie *pcie = dev_get_drvdata(dev);
2168 	u32 val;
2169 
2170 	if (!pcie->link_state)
2171 		return 0;
2172 
2173 	/* Enable HW_HOT_RST mode */
2174 	val = appl_readl(pcie, APPL_CTRL);
2175 	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2176 		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2177 	val |= APPL_CTRL_HW_HOT_RST_EN;
2178 	appl_writel(pcie, val, APPL_CTRL);
2179 
2180 	return 0;
2181 }
2182 
2183 static int tegra194_pcie_suspend_noirq(struct device *dev)
2184 {
2185 	struct tegra194_pcie *pcie = dev_get_drvdata(dev);
2186 
2187 	if (!pcie->link_state)
2188 		return 0;
2189 
2190 	tegra_pcie_downstream_dev_to_D0(pcie);
2191 	tegra194_pcie_pme_turnoff(pcie);
2192 	tegra_pcie_unconfig_controller(pcie);
2193 
2194 	return 0;
2195 }
2196 
2197 static int tegra194_pcie_resume_noirq(struct device *dev)
2198 {
2199 	struct tegra194_pcie *pcie = dev_get_drvdata(dev);
2200 	int ret;
2201 
2202 	if (!pcie->link_state)
2203 		return 0;
2204 
2205 	ret = tegra_pcie_config_controller(pcie, true);
2206 	if (ret < 0)
2207 		return ret;
2208 
2209 	ret = tegra194_pcie_host_init(&pcie->pci.pp);
2210 	if (ret < 0) {
2211 		dev_err(dev, "Failed to init host: %d\n", ret);
2212 		goto fail_host_init;
2213 	}
2214 
2215 	dw_pcie_setup_rc(&pcie->pci.pp);
2216 
2217 	ret = tegra194_pcie_start_link(&pcie->pci);
2218 	if (ret < 0)
2219 		goto fail_host_init;
2220 
2221 	return 0;
2222 
2223 fail_host_init:
2224 	tegra_pcie_unconfig_controller(pcie);
2225 	return ret;
2226 }
2227 
2228 static int tegra194_pcie_resume_early(struct device *dev)
2229 {
2230 	struct tegra194_pcie *pcie = dev_get_drvdata(dev);
2231 	u32 val;
2232 
2233 	if (pcie->mode == DW_PCIE_EP_TYPE) {
2234 		dev_err(dev, "Suspend is not supported in EP mode");
2235 		return -ENOTSUPP;
2236 	}
2237 
2238 	if (!pcie->link_state)
2239 		return 0;
2240 
2241 	/* Disable HW_HOT_RST mode */
2242 	val = appl_readl(pcie, APPL_CTRL);
2243 	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2244 		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2245 	val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2246 	       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2247 	val &= ~APPL_CTRL_HW_HOT_RST_EN;
2248 	appl_writel(pcie, val, APPL_CTRL);
2249 
2250 	return 0;
2251 }
2252 
2253 static void tegra194_pcie_shutdown(struct platform_device *pdev)
2254 {
2255 	struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
2256 
2257 	if (!pcie->link_state)
2258 		return;
2259 
2260 	debugfs_remove_recursive(pcie->debugfs);
2261 	tegra_pcie_downstream_dev_to_D0(pcie);
2262 
2263 	disable_irq(pcie->pci.pp.irq);
2264 	if (IS_ENABLED(CONFIG_PCI_MSI))
2265 		disable_irq(pcie->pci.pp.msi_irq);
2266 
2267 	tegra194_pcie_pme_turnoff(pcie);
2268 	tegra_pcie_unconfig_controller(pcie);
2269 }
2270 
2271 static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = {
2272 	.mode = DW_PCIE_RC_TYPE,
2273 };
2274 
2275 static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = {
2276 	.mode = DW_PCIE_EP_TYPE,
2277 };
2278 
2279 static const struct of_device_id tegra194_pcie_of_match[] = {
2280 	{
2281 		.compatible = "nvidia,tegra194-pcie",
2282 		.data = &tegra194_pcie_rc_of_data,
2283 	},
2284 	{
2285 		.compatible = "nvidia,tegra194-pcie-ep",
2286 		.data = &tegra194_pcie_ep_of_data,
2287 	},
2288 	{},
2289 };
2290 
2291 static const struct dev_pm_ops tegra194_pcie_pm_ops = {
2292 	.suspend_late = tegra194_pcie_suspend_late,
2293 	.suspend_noirq = tegra194_pcie_suspend_noirq,
2294 	.resume_noirq = tegra194_pcie_resume_noirq,
2295 	.resume_early = tegra194_pcie_resume_early,
2296 };
2297 
2298 static struct platform_driver tegra194_pcie_driver = {
2299 	.probe = tegra194_pcie_probe,
2300 	.remove = tegra194_pcie_remove,
2301 	.shutdown = tegra194_pcie_shutdown,
2302 	.driver = {
2303 		.name	= "tegra194-pcie",
2304 		.pm = &tegra194_pcie_pm_ops,
2305 		.of_match_table = tegra194_pcie_of_match,
2306 	},
2307 };
2308 module_platform_driver(tegra194_pcie_driver);
2309 
2310 MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match);
2311 
2312 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2313 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2314 MODULE_LICENSE("GPL v2");
2315