1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for the Aardvark PCIe controller, used on Marvell Armada
4  * 3700.
5  *
6  * Copyright (C) 2016 Marvell
7  *
8  * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/gpio/consumer.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pci-ecam.h>
20 #include <linux/init.h>
21 #include <linux/phy/phy.h>
22 #include <linux/platform_device.h>
23 #include <linux/msi.h>
24 #include <linux/of_address.h>
25 #include <linux/of_gpio.h>
26 #include <linux/of_pci.h>
27 
28 #include "../pci.h"
29 #include "../pci-bridge-emul.h"
30 
31 /* PCIe core registers */
32 #define PCIE_CORE_DEV_ID_REG					0x0
33 #define PCIE_CORE_CMD_STATUS_REG				0x4
34 #define     PCIE_CORE_CMD_IO_ACCESS_EN				BIT(0)
35 #define     PCIE_CORE_CMD_MEM_ACCESS_EN				BIT(1)
36 #define     PCIE_CORE_CMD_MEM_IO_REQ_EN				BIT(2)
37 #define PCIE_CORE_DEV_REV_REG					0x8
38 #define PCIE_CORE_PCIEXP_CAP					0xc0
39 #define PCIE_CORE_ERR_CAPCTL_REG				0x118
40 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX			BIT(5)
41 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN			BIT(6)
42 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK			BIT(7)
43 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV			BIT(8)
44 #define     PCIE_CORE_INT_A_ASSERT_ENABLE			1
45 #define     PCIE_CORE_INT_B_ASSERT_ENABLE			2
46 #define     PCIE_CORE_INT_C_ASSERT_ENABLE			3
47 #define     PCIE_CORE_INT_D_ASSERT_ENABLE			4
48 /* PIO registers base address and register offsets */
49 #define PIO_BASE_ADDR				0x4000
50 #define PIO_CTRL				(PIO_BASE_ADDR + 0x0)
51 #define   PIO_CTRL_TYPE_MASK			GENMASK(3, 0)
52 #define   PIO_CTRL_ADDR_WIN_DISABLE		BIT(24)
53 #define PIO_STAT				(PIO_BASE_ADDR + 0x4)
54 #define   PIO_COMPLETION_STATUS_SHIFT		7
55 #define   PIO_COMPLETION_STATUS_MASK		GENMASK(9, 7)
56 #define   PIO_COMPLETION_STATUS_OK		0
57 #define   PIO_COMPLETION_STATUS_UR		1
58 #define   PIO_COMPLETION_STATUS_CRS		2
59 #define   PIO_COMPLETION_STATUS_CA		4
60 #define   PIO_NON_POSTED_REQ			BIT(10)
61 #define   PIO_ERR_STATUS			BIT(11)
62 #define PIO_ADDR_LS				(PIO_BASE_ADDR + 0x8)
63 #define PIO_ADDR_MS				(PIO_BASE_ADDR + 0xc)
64 #define PIO_WR_DATA				(PIO_BASE_ADDR + 0x10)
65 #define PIO_WR_DATA_STRB			(PIO_BASE_ADDR + 0x14)
66 #define PIO_RD_DATA				(PIO_BASE_ADDR + 0x18)
67 #define PIO_START				(PIO_BASE_ADDR + 0x1c)
68 #define PIO_ISR					(PIO_BASE_ADDR + 0x20)
69 #define PIO_ISRM				(PIO_BASE_ADDR + 0x24)
70 
71 /* Aardvark Control registers */
72 #define CONTROL_BASE_ADDR			0x4800
73 #define PCIE_CORE_CTRL0_REG			(CONTROL_BASE_ADDR + 0x0)
74 #define     PCIE_GEN_SEL_MSK			0x3
75 #define     PCIE_GEN_SEL_SHIFT			0x0
76 #define     SPEED_GEN_1				0
77 #define     SPEED_GEN_2				1
78 #define     SPEED_GEN_3				2
79 #define     IS_RC_MSK				1
80 #define     IS_RC_SHIFT				2
81 #define     LANE_CNT_MSK			0x18
82 #define     LANE_CNT_SHIFT			0x3
83 #define     LANE_COUNT_1			(0 << LANE_CNT_SHIFT)
84 #define     LANE_COUNT_2			(1 << LANE_CNT_SHIFT)
85 #define     LANE_COUNT_4			(2 << LANE_CNT_SHIFT)
86 #define     LANE_COUNT_8			(3 << LANE_CNT_SHIFT)
87 #define     LINK_TRAINING_EN			BIT(6)
88 #define     LEGACY_INTA				BIT(28)
89 #define     LEGACY_INTB				BIT(29)
90 #define     LEGACY_INTC				BIT(30)
91 #define     LEGACY_INTD				BIT(31)
92 #define PCIE_CORE_CTRL1_REG			(CONTROL_BASE_ADDR + 0x4)
93 #define     HOT_RESET_GEN			BIT(0)
94 #define PCIE_CORE_CTRL2_REG			(CONTROL_BASE_ADDR + 0x8)
95 #define     PCIE_CORE_CTRL2_RESERVED		0x7
96 #define     PCIE_CORE_CTRL2_TD_ENABLE		BIT(4)
97 #define     PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE	BIT(5)
98 #define     PCIE_CORE_CTRL2_OB_WIN_ENABLE	BIT(6)
99 #define     PCIE_CORE_CTRL2_MSI_ENABLE		BIT(10)
100 #define PCIE_CORE_REF_CLK_REG			(CONTROL_BASE_ADDR + 0x14)
101 #define     PCIE_CORE_REF_CLK_TX_ENABLE		BIT(1)
102 #define PCIE_MSG_LOG_REG			(CONTROL_BASE_ADDR + 0x30)
103 #define PCIE_ISR0_REG				(CONTROL_BASE_ADDR + 0x40)
104 #define PCIE_MSG_PM_PME_MASK			BIT(7)
105 #define PCIE_ISR0_MASK_REG			(CONTROL_BASE_ADDR + 0x44)
106 #define     PCIE_ISR0_MSI_INT_PENDING		BIT(24)
107 #define     PCIE_ISR0_INTX_ASSERT(val)		BIT(16 + (val))
108 #define     PCIE_ISR0_INTX_DEASSERT(val)	BIT(20 + (val))
109 #define	    PCIE_ISR0_ALL_MASK			GENMASK(26, 0)
110 #define PCIE_ISR1_REG				(CONTROL_BASE_ADDR + 0x48)
111 #define PCIE_ISR1_MASK_REG			(CONTROL_BASE_ADDR + 0x4C)
112 #define     PCIE_ISR1_POWER_STATE_CHANGE	BIT(4)
113 #define     PCIE_ISR1_FLUSH			BIT(5)
114 #define     PCIE_ISR1_INTX_ASSERT(val)		BIT(8 + (val))
115 #define     PCIE_ISR1_ALL_MASK			GENMASK(11, 4)
116 #define PCIE_MSI_ADDR_LOW_REG			(CONTROL_BASE_ADDR + 0x50)
117 #define PCIE_MSI_ADDR_HIGH_REG			(CONTROL_BASE_ADDR + 0x54)
118 #define PCIE_MSI_STATUS_REG			(CONTROL_BASE_ADDR + 0x58)
119 #define PCIE_MSI_MASK_REG			(CONTROL_BASE_ADDR + 0x5C)
120 #define PCIE_MSI_PAYLOAD_REG			(CONTROL_BASE_ADDR + 0x9C)
121 
122 /* PCIe window configuration */
123 #define OB_WIN_BASE_ADDR			0x4c00
124 #define OB_WIN_BLOCK_SIZE			0x20
125 #define OB_WIN_COUNT				8
126 #define OB_WIN_REG_ADDR(win, offset)		(OB_WIN_BASE_ADDR + \
127 						 OB_WIN_BLOCK_SIZE * (win) + \
128 						 (offset))
129 #define OB_WIN_MATCH_LS(win)			OB_WIN_REG_ADDR(win, 0x00)
130 #define     OB_WIN_ENABLE			BIT(0)
131 #define OB_WIN_MATCH_MS(win)			OB_WIN_REG_ADDR(win, 0x04)
132 #define OB_WIN_REMAP_LS(win)			OB_WIN_REG_ADDR(win, 0x08)
133 #define OB_WIN_REMAP_MS(win)			OB_WIN_REG_ADDR(win, 0x0c)
134 #define OB_WIN_MASK_LS(win)			OB_WIN_REG_ADDR(win, 0x10)
135 #define OB_WIN_MASK_MS(win)			OB_WIN_REG_ADDR(win, 0x14)
136 #define OB_WIN_ACTIONS(win)			OB_WIN_REG_ADDR(win, 0x18)
137 #define OB_WIN_DEFAULT_ACTIONS			(OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
138 #define     OB_WIN_FUNC_NUM_MASK		GENMASK(31, 24)
139 #define     OB_WIN_FUNC_NUM_SHIFT		24
140 #define     OB_WIN_FUNC_NUM_ENABLE		BIT(23)
141 #define     OB_WIN_BUS_NUM_BITS_MASK		GENMASK(22, 20)
142 #define     OB_WIN_BUS_NUM_BITS_SHIFT		20
143 #define     OB_WIN_MSG_CODE_ENABLE		BIT(22)
144 #define     OB_WIN_MSG_CODE_MASK		GENMASK(21, 14)
145 #define     OB_WIN_MSG_CODE_SHIFT		14
146 #define     OB_WIN_MSG_PAYLOAD_LEN		BIT(12)
147 #define     OB_WIN_ATTR_ENABLE			BIT(11)
148 #define     OB_WIN_ATTR_TC_MASK			GENMASK(10, 8)
149 #define     OB_WIN_ATTR_TC_SHIFT		8
150 #define     OB_WIN_ATTR_RELAXED			BIT(7)
151 #define     OB_WIN_ATTR_NOSNOOP			BIT(6)
152 #define     OB_WIN_ATTR_POISON			BIT(5)
153 #define     OB_WIN_ATTR_IDO			BIT(4)
154 #define     OB_WIN_TYPE_MASK			GENMASK(3, 0)
155 #define     OB_WIN_TYPE_SHIFT			0
156 #define     OB_WIN_TYPE_MEM			0x0
157 #define     OB_WIN_TYPE_IO			0x4
158 #define     OB_WIN_TYPE_CONFIG_TYPE0		0x8
159 #define     OB_WIN_TYPE_CONFIG_TYPE1		0x9
160 #define     OB_WIN_TYPE_MSG			0xc
161 
162 /* LMI registers base address and register offsets */
163 #define LMI_BASE_ADDR				0x6000
164 #define CFG_REG					(LMI_BASE_ADDR + 0x0)
165 #define     LTSSM_SHIFT				24
166 #define     LTSSM_MASK				0x3f
167 #define     LTSSM_L0				0x10
168 #define     RC_BAR_CONFIG			0x300
169 #define VENDOR_ID_REG				(LMI_BASE_ADDR + 0x44)
170 
171 /* PCIe core controller registers */
172 #define CTRL_CORE_BASE_ADDR			0x18000
173 #define CTRL_CONFIG_REG				(CTRL_CORE_BASE_ADDR + 0x0)
174 #define     CTRL_MODE_SHIFT			0x0
175 #define     CTRL_MODE_MASK			0x1
176 #define     PCIE_CORE_MODE_DIRECT		0x0
177 #define     PCIE_CORE_MODE_COMMAND		0x1
178 
179 /* PCIe Central Interrupts Registers */
180 #define CENTRAL_INT_BASE_ADDR			0x1b000
181 #define HOST_CTRL_INT_STATUS_REG		(CENTRAL_INT_BASE_ADDR + 0x0)
182 #define HOST_CTRL_INT_MASK_REG			(CENTRAL_INT_BASE_ADDR + 0x4)
183 #define     PCIE_IRQ_CMDQ_INT			BIT(0)
184 #define     PCIE_IRQ_MSI_STATUS_INT		BIT(1)
185 #define     PCIE_IRQ_CMD_SENT_DONE		BIT(3)
186 #define     PCIE_IRQ_DMA_INT			BIT(4)
187 #define     PCIE_IRQ_IB_DXFERDONE		BIT(5)
188 #define     PCIE_IRQ_OB_DXFERDONE		BIT(6)
189 #define     PCIE_IRQ_OB_RXFERDONE		BIT(7)
190 #define     PCIE_IRQ_COMPQ_INT			BIT(12)
191 #define     PCIE_IRQ_DIR_RD_DDR_DET		BIT(13)
192 #define     PCIE_IRQ_DIR_WR_DDR_DET		BIT(14)
193 #define     PCIE_IRQ_CORE_INT			BIT(16)
194 #define     PCIE_IRQ_CORE_INT_PIO		BIT(17)
195 #define     PCIE_IRQ_DPMU_INT			BIT(18)
196 #define     PCIE_IRQ_PCIE_MIS_INT		BIT(19)
197 #define     PCIE_IRQ_MSI_INT1_DET		BIT(20)
198 #define     PCIE_IRQ_MSI_INT2_DET		BIT(21)
199 #define     PCIE_IRQ_RC_DBELL_DET		BIT(22)
200 #define     PCIE_IRQ_EP_STATUS			BIT(23)
201 #define     PCIE_IRQ_ALL_MASK			0xfff0fb
202 #define     PCIE_IRQ_ENABLE_INTS_MASK		PCIE_IRQ_CORE_INT
203 
204 /* Transaction types */
205 #define PCIE_CONFIG_RD_TYPE0			0x8
206 #define PCIE_CONFIG_RD_TYPE1			0x9
207 #define PCIE_CONFIG_WR_TYPE0			0xa
208 #define PCIE_CONFIG_WR_TYPE1			0xb
209 
210 #define PIO_RETRY_CNT			750000 /* 1.5 s */
211 #define PIO_RETRY_DELAY			2 /* 2 us*/
212 
213 #define LINK_WAIT_MAX_RETRIES		10
214 #define LINK_WAIT_USLEEP_MIN		90000
215 #define LINK_WAIT_USLEEP_MAX		100000
216 #define RETRAIN_WAIT_MAX_RETRIES	10
217 #define RETRAIN_WAIT_USLEEP_US		2000
218 
219 #define MSI_IRQ_NUM			32
220 
221 #define CFG_RD_CRS_VAL			0xffff0001
222 
223 struct advk_pcie {
224 	struct platform_device *pdev;
225 	void __iomem *base;
226 	struct {
227 		phys_addr_t match;
228 		phys_addr_t remap;
229 		phys_addr_t mask;
230 		u32 actions;
231 	} wins[OB_WIN_COUNT];
232 	u8 wins_count;
233 	struct irq_domain *irq_domain;
234 	struct irq_chip irq_chip;
235 	raw_spinlock_t irq_lock;
236 	struct irq_domain *msi_domain;
237 	struct irq_domain *msi_inner_domain;
238 	struct irq_chip msi_bottom_irq_chip;
239 	struct irq_chip msi_irq_chip;
240 	struct msi_domain_info msi_domain_info;
241 	DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
242 	struct mutex msi_used_lock;
243 	u16 msi_msg;
244 	int link_gen;
245 	struct pci_bridge_emul bridge;
246 	struct gpio_desc *reset_gpio;
247 	struct phy *phy;
248 };
249 
250 static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
251 {
252 	writel(val, pcie->base + reg);
253 }
254 
255 static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
256 {
257 	return readl(pcie->base + reg);
258 }
259 
260 static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg)
261 {
262 	return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8);
263 }
264 
265 static int advk_pcie_link_up(struct advk_pcie *pcie)
266 {
267 	u32 val, ltssm_state;
268 
269 	val = advk_readl(pcie, CFG_REG);
270 	ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
271 	return ltssm_state >= LTSSM_L0;
272 }
273 
274 static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
275 {
276 	int retries;
277 
278 	/* check if the link is up or not */
279 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
280 		if (advk_pcie_link_up(pcie))
281 			return 0;
282 
283 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
284 	}
285 
286 	return -ETIMEDOUT;
287 }
288 
289 static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
290 {
291 	size_t retries;
292 
293 	for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
294 		if (!advk_pcie_link_up(pcie))
295 			break;
296 		udelay(RETRAIN_WAIT_USLEEP_US);
297 	}
298 }
299 
300 static void advk_pcie_issue_perst(struct advk_pcie *pcie)
301 {
302 	u32 reg;
303 
304 	if (!pcie->reset_gpio)
305 		return;
306 
307 	/*
308 	 * As required by PCI Express spec (PCI Express Base Specification, REV.
309 	 * 4.0 PCI Express, February 19 2014, 6.6.1 Conventional Reset) a delay
310 	 * for at least 100ms after de-asserting PERST# signal is needed before
311 	 * link training is enabled. So ensure that link training is disabled
312 	 * prior de-asserting PERST# signal to fulfill that PCI Express spec
313 	 * requirement.
314 	 */
315 	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
316 	reg &= ~LINK_TRAINING_EN;
317 	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
318 
319 	/* 10ms delay is needed for some cards */
320 	dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
321 	gpiod_set_value_cansleep(pcie->reset_gpio, 1);
322 	usleep_range(10000, 11000);
323 	gpiod_set_value_cansleep(pcie->reset_gpio, 0);
324 }
325 
326 static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
327 {
328 	int ret, neg_gen;
329 	u32 reg;
330 
331 	/* Setup link speed */
332 	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
333 	reg &= ~PCIE_GEN_SEL_MSK;
334 	if (gen == 3)
335 		reg |= SPEED_GEN_3;
336 	else if (gen == 2)
337 		reg |= SPEED_GEN_2;
338 	else
339 		reg |= SPEED_GEN_1;
340 	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
341 
342 	/*
343 	 * Enable link training. This is not needed in every call to this
344 	 * function, just once suffices, but it does not break anything either.
345 	 */
346 	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
347 	reg |= LINK_TRAINING_EN;
348 	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
349 
350 	/*
351 	 * Start link training immediately after enabling it.
352 	 * This solves problems for some buggy cards.
353 	 */
354 	reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
355 	reg |= PCI_EXP_LNKCTL_RL;
356 	advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
357 
358 	ret = advk_pcie_wait_for_link(pcie);
359 	if (ret)
360 		return ret;
361 
362 	reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA);
363 	neg_gen = reg & PCI_EXP_LNKSTA_CLS;
364 
365 	return neg_gen;
366 }
367 
368 static void advk_pcie_train_link(struct advk_pcie *pcie)
369 {
370 	struct device *dev = &pcie->pdev->dev;
371 	int neg_gen = -1, gen;
372 
373 	/*
374 	 * Reset PCIe card via PERST# signal. Some cards are not detected
375 	 * during link training when they are in some non-initial state.
376 	 */
377 	advk_pcie_issue_perst(pcie);
378 
379 	/*
380 	 * PERST# signal could have been asserted by pinctrl subsystem before
381 	 * probe() callback has been called or issued explicitly by reset gpio
382 	 * function advk_pcie_issue_perst(), making the endpoint going into
383 	 * fundamental reset. As required by PCI Express spec a delay for at
384 	 * least 100ms after such a reset before link training is needed.
385 	 */
386 	msleep(PCI_PM_D3COLD_WAIT);
387 
388 	/*
389 	 * Try link training at link gen specified by device tree property
390 	 * 'max-link-speed'. If this fails, iteratively train at lower gen.
391 	 */
392 	for (gen = pcie->link_gen; gen > 0; --gen) {
393 		neg_gen = advk_pcie_train_at_gen(pcie, gen);
394 		if (neg_gen > 0)
395 			break;
396 	}
397 
398 	if (neg_gen < 0)
399 		goto err;
400 
401 	/*
402 	 * After successful training if negotiated gen is lower than requested,
403 	 * train again on negotiated gen. This solves some stability issues for
404 	 * some buggy gen1 cards.
405 	 */
406 	if (neg_gen < gen) {
407 		gen = neg_gen;
408 		neg_gen = advk_pcie_train_at_gen(pcie, gen);
409 	}
410 
411 	if (neg_gen == gen) {
412 		dev_info(dev, "link up at gen %i\n", gen);
413 		return;
414 	}
415 
416 err:
417 	dev_err(dev, "link never came up\n");
418 }
419 
420 /*
421  * Set PCIe address window register which could be used for memory
422  * mapping.
423  */
424 static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
425 				 phys_addr_t match, phys_addr_t remap,
426 				 phys_addr_t mask, u32 actions)
427 {
428 	advk_writel(pcie, OB_WIN_ENABLE |
429 			  lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
430 	advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
431 	advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
432 	advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
433 	advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
434 	advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
435 	advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
436 }
437 
438 static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
439 {
440 	advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
441 	advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
442 	advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
443 	advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
444 	advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
445 	advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
446 	advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
447 }
448 
449 static void advk_pcie_setup_hw(struct advk_pcie *pcie)
450 {
451 	u32 reg;
452 	int i;
453 
454 	/* Enable TX */
455 	reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
456 	reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
457 	advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
458 
459 	/* Set to Direct mode */
460 	reg = advk_readl(pcie, CTRL_CONFIG_REG);
461 	reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
462 	reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
463 	advk_writel(pcie, reg, CTRL_CONFIG_REG);
464 
465 	/* Set PCI global control register to RC mode */
466 	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
467 	reg |= (IS_RC_MSK << IS_RC_SHIFT);
468 	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
469 
470 	/*
471 	 * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
472 	 * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
473 	 * id in high 16 bits. Updating this register changes readback value of
474 	 * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
475 	 * for erratum 4.1: "The value of device and vendor ID is incorrect".
476 	 */
477 	reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
478 	advk_writel(pcie, reg, VENDOR_ID_REG);
479 
480 	/* Set Advanced Error Capabilities and Control PF0 register */
481 	reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
482 		PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
483 		PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
484 		PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
485 	advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
486 
487 	/* Set PCIe Device Control register */
488 	reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
489 	reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
490 	reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
491 	reg &= ~PCI_EXP_DEVCTL_READRQ;
492 	reg |= PCI_EXP_DEVCTL_PAYLOAD; /* Set max payload size */
493 	reg |= PCI_EXP_DEVCTL_READRQ_512B;
494 	advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
495 
496 	/* Program PCIe Control 2 to disable strict ordering */
497 	reg = PCIE_CORE_CTRL2_RESERVED |
498 		PCIE_CORE_CTRL2_TD_ENABLE;
499 	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
500 
501 	/* Set lane X1 */
502 	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
503 	reg &= ~LANE_CNT_MSK;
504 	reg |= LANE_COUNT_1;
505 	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
506 
507 	/* Enable MSI */
508 	reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
509 	reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
510 	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
511 
512 	/* Clear all interrupts */
513 	advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
514 	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
515 	advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
516 
517 	/* Disable All ISR0/1 Sources */
518 	reg = PCIE_ISR0_ALL_MASK;
519 	reg &= ~PCIE_ISR0_MSI_INT_PENDING;
520 	advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
521 
522 	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
523 
524 	/* Unmask all MSIs */
525 	advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
526 
527 	/* Enable summary interrupt for GIC SPI source */
528 	reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
529 	advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
530 
531 	/*
532 	 * Enable AXI address window location generation:
533 	 * When it is enabled, the default outbound window
534 	 * configurations (Default User Field: 0xD0074CFC)
535 	 * are used to transparent address translation for
536 	 * the outbound transactions. Thus, PCIe address
537 	 * windows are not required for transparent memory
538 	 * access when default outbound window configuration
539 	 * is set for memory access.
540 	 */
541 	reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
542 	reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
543 	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
544 
545 	/*
546 	 * Set memory access in Default User Field so it
547 	 * is not required to configure PCIe address for
548 	 * transparent memory access.
549 	 */
550 	advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
551 
552 	/*
553 	 * Bypass the address window mapping for PIO:
554 	 * Since PIO access already contains all required
555 	 * info over AXI interface by PIO registers, the
556 	 * address window is not required.
557 	 */
558 	reg = advk_readl(pcie, PIO_CTRL);
559 	reg |= PIO_CTRL_ADDR_WIN_DISABLE;
560 	advk_writel(pcie, reg, PIO_CTRL);
561 
562 	/*
563 	 * Configure PCIe address windows for non-memory or
564 	 * non-transparent access as by default PCIe uses
565 	 * transparent memory access.
566 	 */
567 	for (i = 0; i < pcie->wins_count; i++)
568 		advk_pcie_set_ob_win(pcie, i,
569 				     pcie->wins[i].match, pcie->wins[i].remap,
570 				     pcie->wins[i].mask, pcie->wins[i].actions);
571 
572 	/* Disable remaining PCIe outbound windows */
573 	for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
574 		advk_pcie_disable_ob_win(pcie, i);
575 
576 	advk_pcie_train_link(pcie);
577 
578 	/*
579 	 * FIXME: The following register update is suspicious. This register is
580 	 * applicable only when the PCI controller is configured for Endpoint
581 	 * mode, not as a Root Complex. But apparently when this code is
582 	 * removed, some cards stop working. This should be investigated and
583 	 * a comment explaining this should be put here.
584 	 */
585 	reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
586 	reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
587 		PCIE_CORE_CMD_IO_ACCESS_EN |
588 		PCIE_CORE_CMD_MEM_IO_REQ_EN;
589 	advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
590 }
591 
592 static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
593 {
594 	struct device *dev = &pcie->pdev->dev;
595 	u32 reg;
596 	unsigned int status;
597 	char *strcomp_status, *str_posted;
598 
599 	reg = advk_readl(pcie, PIO_STAT);
600 	status = (reg & PIO_COMPLETION_STATUS_MASK) >>
601 		PIO_COMPLETION_STATUS_SHIFT;
602 
603 	/*
604 	 * According to HW spec, the PIO status check sequence as below:
605 	 * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
606 	 *    it still needs to check Error Status(bit11), only when this bit
607 	 *    indicates no error happen, the operation is successful.
608 	 * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
609 	 *    means a PIO write error, and for PIO read it is successful with
610 	 *    a read value of 0xFFFFFFFF.
611 	 * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
612 	 *    only means a PIO write error, and for PIO read it is successful
613 	 *    with a read value of 0xFFFF0001.
614 	 * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
615 	 *    error for both PIO read and PIO write operation.
616 	 * 5) other errors are indicated as 'unknown'.
617 	 */
618 	switch (status) {
619 	case PIO_COMPLETION_STATUS_OK:
620 		if (reg & PIO_ERR_STATUS) {
621 			strcomp_status = "COMP_ERR";
622 			break;
623 		}
624 		/* Get the read result */
625 		if (val)
626 			*val = advk_readl(pcie, PIO_RD_DATA);
627 		/* No error */
628 		strcomp_status = NULL;
629 		break;
630 	case PIO_COMPLETION_STATUS_UR:
631 		strcomp_status = "UR";
632 		break;
633 	case PIO_COMPLETION_STATUS_CRS:
634 		if (allow_crs && val) {
635 			/* PCIe r4.0, sec 2.3.2, says:
636 			 * If CRS Software Visibility is enabled:
637 			 * For a Configuration Read Request that includes both
638 			 * bytes of the Vendor ID field of a device Function's
639 			 * Configuration Space Header, the Root Complex must
640 			 * complete the Request to the host by returning a
641 			 * read-data value of 0001h for the Vendor ID field and
642 			 * all '1's for any additional bytes included in the
643 			 * request.
644 			 *
645 			 * So CRS in this case is not an error status.
646 			 */
647 			*val = CFG_RD_CRS_VAL;
648 			strcomp_status = NULL;
649 			break;
650 		}
651 		/* PCIe r4.0, sec 2.3.2, says:
652 		 * If CRS Software Visibility is not enabled, the Root Complex
653 		 * must re-issue the Configuration Request as a new Request.
654 		 * If CRS Software Visibility is enabled: For a Configuration
655 		 * Write Request or for any other Configuration Read Request,
656 		 * the Root Complex must re-issue the Configuration Request as
657 		 * a new Request.
658 		 * A Root Complex implementation may choose to limit the number
659 		 * of Configuration Request/CRS Completion Status loops before
660 		 * determining that something is wrong with the target of the
661 		 * Request and taking appropriate action, e.g., complete the
662 		 * Request to the host as a failed transaction.
663 		 *
664 		 * To simplify implementation do not re-issue the Configuration
665 		 * Request and complete the Request as a failed transaction.
666 		 */
667 		strcomp_status = "CRS";
668 		break;
669 	case PIO_COMPLETION_STATUS_CA:
670 		strcomp_status = "CA";
671 		break;
672 	default:
673 		strcomp_status = "Unknown";
674 		break;
675 	}
676 
677 	if (!strcomp_status)
678 		return 0;
679 
680 	if (reg & PIO_NON_POSTED_REQ)
681 		str_posted = "Non-posted";
682 	else
683 		str_posted = "Posted";
684 
685 	dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
686 		str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
687 
688 	return -EFAULT;
689 }
690 
691 static int advk_pcie_wait_pio(struct advk_pcie *pcie)
692 {
693 	struct device *dev = &pcie->pdev->dev;
694 	int i;
695 
696 	for (i = 0; i < PIO_RETRY_CNT; i++) {
697 		u32 start, isr;
698 
699 		start = advk_readl(pcie, PIO_START);
700 		isr = advk_readl(pcie, PIO_ISR);
701 		if (!start && isr)
702 			return 0;
703 		udelay(PIO_RETRY_DELAY);
704 	}
705 
706 	dev_err(dev, "PIO read/write transfer time out\n");
707 	return -ETIMEDOUT;
708 }
709 
710 
711 static pci_bridge_emul_read_status_t
712 advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
713 				    int reg, u32 *value)
714 {
715 	struct advk_pcie *pcie = bridge->data;
716 
717 
718 	switch (reg) {
719 	case PCI_EXP_SLTCTL:
720 		*value = PCI_EXP_SLTSTA_PDS << 16;
721 		return PCI_BRIDGE_EMUL_HANDLED;
722 
723 	case PCI_EXP_RTCTL: {
724 		u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
725 		*value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
726 		*value |= PCI_EXP_RTCAP_CRSVIS << 16;
727 		return PCI_BRIDGE_EMUL_HANDLED;
728 	}
729 
730 	case PCI_EXP_RTSTA: {
731 		u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
732 		u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
733 		*value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
734 		return PCI_BRIDGE_EMUL_HANDLED;
735 	}
736 
737 	case PCI_EXP_LNKCTL: {
738 		/* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
739 		u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
740 			~(PCI_EXP_LNKSTA_LT << 16);
741 		if (!advk_pcie_link_up(pcie))
742 			val |= (PCI_EXP_LNKSTA_LT << 16);
743 		*value = val;
744 		return PCI_BRIDGE_EMUL_HANDLED;
745 	}
746 
747 	case PCI_CAP_LIST_ID:
748 	case PCI_EXP_DEVCAP:
749 	case PCI_EXP_DEVCTL:
750 	case PCI_EXP_LNKCAP:
751 		*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
752 		return PCI_BRIDGE_EMUL_HANDLED;
753 	default:
754 		return PCI_BRIDGE_EMUL_NOT_HANDLED;
755 	}
756 
757 }
758 
759 static void
760 advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
761 				     int reg, u32 old, u32 new, u32 mask)
762 {
763 	struct advk_pcie *pcie = bridge->data;
764 
765 	switch (reg) {
766 	case PCI_EXP_DEVCTL:
767 		advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
768 		break;
769 
770 	case PCI_EXP_LNKCTL:
771 		advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
772 		if (new & PCI_EXP_LNKCTL_RL)
773 			advk_pcie_wait_for_retrain(pcie);
774 		break;
775 
776 	case PCI_EXP_RTCTL: {
777 		/* Only mask/unmask PME interrupt */
778 		u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
779 			~PCIE_MSG_PM_PME_MASK;
780 		if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
781 			val |= PCIE_MSG_PM_PME_MASK;
782 		advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
783 		break;
784 	}
785 
786 	case PCI_EXP_RTSTA:
787 		new = (new & PCI_EXP_RTSTA_PME) >> 9;
788 		advk_writel(pcie, new, PCIE_ISR0_REG);
789 		break;
790 
791 	default:
792 		break;
793 	}
794 }
795 
796 static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
797 	.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
798 	.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
799 };
800 
801 /*
802  * Initialize the configuration space of the PCI-to-PCI bridge
803  * associated with the given PCIe interface.
804  */
805 static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
806 {
807 	struct pci_bridge_emul *bridge = &pcie->bridge;
808 	int ret;
809 
810 	bridge->conf.vendor =
811 		cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
812 	bridge->conf.device =
813 		cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
814 	bridge->conf.class_revision =
815 		cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
816 
817 	/* Support 32 bits I/O addressing */
818 	bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
819 	bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
820 
821 	/* Support 64 bits memory pref */
822 	bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
823 	bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
824 
825 	/* Support interrupt A for MSI feature */
826 	bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
827 
828 	bridge->has_pcie = true;
829 	bridge->data = pcie;
830 	bridge->ops = &advk_pci_bridge_emul_ops;
831 
832 	/* PCIe config space can be initialized after pci_bridge_emul_init() */
833 	ret = pci_bridge_emul_init(bridge, 0);
834 	if (ret < 0)
835 		return ret;
836 
837 	/* Indicates supports for Completion Retry Status */
838 	bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
839 
840 	return 0;
841 }
842 
843 static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
844 				  int devfn)
845 {
846 	if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0)
847 		return false;
848 
849 	/*
850 	 * If the link goes down after we check for link-up, nothing bad
851 	 * happens but the config access times out.
852 	 */
853 	if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
854 		return false;
855 
856 	return true;
857 }
858 
859 static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
860 {
861 	struct device *dev = &pcie->pdev->dev;
862 
863 	/*
864 	 * Trying to start a new PIO transfer when previous has not completed
865 	 * cause External Abort on CPU which results in kernel panic:
866 	 *
867 	 *     SError Interrupt on CPU0, code 0xbf000002 -- SError
868 	 *     Kernel panic - not syncing: Asynchronous SError Interrupt
869 	 *
870 	 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
871 	 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
872 	 * concurrent calls at the same time. But because PIO transfer may take
873 	 * about 1.5s when link is down or card is disconnected, it means that
874 	 * advk_pcie_wait_pio() does not always have to wait for completion.
875 	 *
876 	 * Some versions of ARM Trusted Firmware handles this External Abort at
877 	 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
878 	 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
879 	 */
880 	if (advk_readl(pcie, PIO_START)) {
881 		dev_err(dev, "Previous PIO read/write transfer is still running\n");
882 		return true;
883 	}
884 
885 	return false;
886 }
887 
888 static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
889 			     int where, int size, u32 *val)
890 {
891 	struct advk_pcie *pcie = bus->sysdata;
892 	bool allow_crs;
893 	u32 reg;
894 	int ret;
895 
896 	if (!advk_pcie_valid_device(pcie, bus, devfn)) {
897 		*val = 0xffffffff;
898 		return PCIBIOS_DEVICE_NOT_FOUND;
899 	}
900 
901 	if (pci_is_root_bus(bus))
902 		return pci_bridge_emul_conf_read(&pcie->bridge, where,
903 						 size, val);
904 
905 	/*
906 	 * Completion Retry Status is possible to return only when reading all
907 	 * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
908 	 * CRSSVE flag on Root Bridge is enabled.
909 	 */
910 	allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
911 		    (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
912 		     PCI_EXP_RTCTL_CRSSVE);
913 
914 	if (advk_pcie_pio_is_running(pcie)) {
915 		/*
916 		 * If it is possible return Completion Retry Status so caller
917 		 * tries to issue the request again instead of failing.
918 		 */
919 		if (allow_crs) {
920 			*val = CFG_RD_CRS_VAL;
921 			return PCIBIOS_SUCCESSFUL;
922 		}
923 		*val = 0xffffffff;
924 		return PCIBIOS_SET_FAILED;
925 	}
926 
927 	/* Program the control register */
928 	reg = advk_readl(pcie, PIO_CTRL);
929 	reg &= ~PIO_CTRL_TYPE_MASK;
930 	if (pci_is_root_bus(bus->parent))
931 		reg |= PCIE_CONFIG_RD_TYPE0;
932 	else
933 		reg |= PCIE_CONFIG_RD_TYPE1;
934 	advk_writel(pcie, reg, PIO_CTRL);
935 
936 	/* Program the address registers */
937 	reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
938 	advk_writel(pcie, reg, PIO_ADDR_LS);
939 	advk_writel(pcie, 0, PIO_ADDR_MS);
940 
941 	/* Program the data strobe */
942 	advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
943 
944 	/* Clear PIO DONE ISR and start the transfer */
945 	advk_writel(pcie, 1, PIO_ISR);
946 	advk_writel(pcie, 1, PIO_START);
947 
948 	ret = advk_pcie_wait_pio(pcie);
949 	if (ret < 0) {
950 		/*
951 		 * If it is possible return Completion Retry Status so caller
952 		 * tries to issue the request again instead of failing.
953 		 */
954 		if (allow_crs) {
955 			*val = CFG_RD_CRS_VAL;
956 			return PCIBIOS_SUCCESSFUL;
957 		}
958 		*val = 0xffffffff;
959 		return PCIBIOS_SET_FAILED;
960 	}
961 
962 	/* Check PIO status and get the read result */
963 	ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
964 	if (ret < 0) {
965 		*val = 0xffffffff;
966 		return PCIBIOS_SET_FAILED;
967 	}
968 
969 	if (size == 1)
970 		*val = (*val >> (8 * (where & 3))) & 0xff;
971 	else if (size == 2)
972 		*val = (*val >> (8 * (where & 3))) & 0xffff;
973 
974 	return PCIBIOS_SUCCESSFUL;
975 }
976 
977 static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
978 				int where, int size, u32 val)
979 {
980 	struct advk_pcie *pcie = bus->sysdata;
981 	u32 reg;
982 	u32 data_strobe = 0x0;
983 	int offset;
984 	int ret;
985 
986 	if (!advk_pcie_valid_device(pcie, bus, devfn))
987 		return PCIBIOS_DEVICE_NOT_FOUND;
988 
989 	if (pci_is_root_bus(bus))
990 		return pci_bridge_emul_conf_write(&pcie->bridge, where,
991 						  size, val);
992 
993 	if (where % size)
994 		return PCIBIOS_SET_FAILED;
995 
996 	if (advk_pcie_pio_is_running(pcie))
997 		return PCIBIOS_SET_FAILED;
998 
999 	/* Program the control register */
1000 	reg = advk_readl(pcie, PIO_CTRL);
1001 	reg &= ~PIO_CTRL_TYPE_MASK;
1002 	if (pci_is_root_bus(bus->parent))
1003 		reg |= PCIE_CONFIG_WR_TYPE0;
1004 	else
1005 		reg |= PCIE_CONFIG_WR_TYPE1;
1006 	advk_writel(pcie, reg, PIO_CTRL);
1007 
1008 	/* Program the address registers */
1009 	reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
1010 	advk_writel(pcie, reg, PIO_ADDR_LS);
1011 	advk_writel(pcie, 0, PIO_ADDR_MS);
1012 
1013 	/* Calculate the write strobe */
1014 	offset      = where & 0x3;
1015 	reg         = val << (8 * offset);
1016 	data_strobe = GENMASK(size - 1, 0) << offset;
1017 
1018 	/* Program the data register */
1019 	advk_writel(pcie, reg, PIO_WR_DATA);
1020 
1021 	/* Program the data strobe */
1022 	advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
1023 
1024 	/* Clear PIO DONE ISR and start the transfer */
1025 	advk_writel(pcie, 1, PIO_ISR);
1026 	advk_writel(pcie, 1, PIO_START);
1027 
1028 	ret = advk_pcie_wait_pio(pcie);
1029 	if (ret < 0)
1030 		return PCIBIOS_SET_FAILED;
1031 
1032 	ret = advk_pcie_check_pio_status(pcie, false, NULL);
1033 	if (ret < 0)
1034 		return PCIBIOS_SET_FAILED;
1035 
1036 	return PCIBIOS_SUCCESSFUL;
1037 }
1038 
1039 static struct pci_ops advk_pcie_ops = {
1040 	.read = advk_pcie_rd_conf,
1041 	.write = advk_pcie_wr_conf,
1042 };
1043 
1044 static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
1045 					 struct msi_msg *msg)
1046 {
1047 	struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
1048 	phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
1049 
1050 	msg->address_lo = lower_32_bits(msi_msg);
1051 	msg->address_hi = upper_32_bits(msi_msg);
1052 	msg->data = data->irq;
1053 }
1054 
1055 static int advk_msi_set_affinity(struct irq_data *irq_data,
1056 				 const struct cpumask *mask, bool force)
1057 {
1058 	return -EINVAL;
1059 }
1060 
1061 static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
1062 				     unsigned int virq,
1063 				     unsigned int nr_irqs, void *args)
1064 {
1065 	struct advk_pcie *pcie = domain->host_data;
1066 	int hwirq, i;
1067 
1068 	mutex_lock(&pcie->msi_used_lock);
1069 	hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
1070 					   0, nr_irqs, 0);
1071 	if (hwirq >= MSI_IRQ_NUM) {
1072 		mutex_unlock(&pcie->msi_used_lock);
1073 		return -ENOSPC;
1074 	}
1075 
1076 	bitmap_set(pcie->msi_used, hwirq, nr_irqs);
1077 	mutex_unlock(&pcie->msi_used_lock);
1078 
1079 	for (i = 0; i < nr_irqs; i++)
1080 		irq_domain_set_info(domain, virq + i, hwirq + i,
1081 				    &pcie->msi_bottom_irq_chip,
1082 				    domain->host_data, handle_simple_irq,
1083 				    NULL, NULL);
1084 
1085 	return hwirq;
1086 }
1087 
1088 static void advk_msi_irq_domain_free(struct irq_domain *domain,
1089 				     unsigned int virq, unsigned int nr_irqs)
1090 {
1091 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1092 	struct advk_pcie *pcie = domain->host_data;
1093 
1094 	mutex_lock(&pcie->msi_used_lock);
1095 	bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
1096 	mutex_unlock(&pcie->msi_used_lock);
1097 }
1098 
1099 static const struct irq_domain_ops advk_msi_domain_ops = {
1100 	.alloc = advk_msi_irq_domain_alloc,
1101 	.free = advk_msi_irq_domain_free,
1102 };
1103 
1104 static void advk_pcie_irq_mask(struct irq_data *d)
1105 {
1106 	struct advk_pcie *pcie = d->domain->host_data;
1107 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1108 	unsigned long flags;
1109 	u32 mask;
1110 
1111 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
1112 	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1113 	mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
1114 	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1115 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
1116 }
1117 
1118 static void advk_pcie_irq_unmask(struct irq_data *d)
1119 {
1120 	struct advk_pcie *pcie = d->domain->host_data;
1121 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1122 	unsigned long flags;
1123 	u32 mask;
1124 
1125 	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
1126 	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1127 	mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
1128 	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1129 	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
1130 }
1131 
1132 static int advk_pcie_irq_map(struct irq_domain *h,
1133 			     unsigned int virq, irq_hw_number_t hwirq)
1134 {
1135 	struct advk_pcie *pcie = h->host_data;
1136 
1137 	advk_pcie_irq_mask(irq_get_irq_data(virq));
1138 	irq_set_status_flags(virq, IRQ_LEVEL);
1139 	irq_set_chip_and_handler(virq, &pcie->irq_chip,
1140 				 handle_level_irq);
1141 	irq_set_chip_data(virq, pcie);
1142 
1143 	return 0;
1144 }
1145 
1146 static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
1147 	.map = advk_pcie_irq_map,
1148 	.xlate = irq_domain_xlate_onecell,
1149 };
1150 
1151 static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
1152 {
1153 	struct device *dev = &pcie->pdev->dev;
1154 	struct device_node *node = dev->of_node;
1155 	struct irq_chip *bottom_ic, *msi_ic;
1156 	struct msi_domain_info *msi_di;
1157 	phys_addr_t msi_msg_phys;
1158 
1159 	mutex_init(&pcie->msi_used_lock);
1160 
1161 	bottom_ic = &pcie->msi_bottom_irq_chip;
1162 
1163 	bottom_ic->name = "MSI";
1164 	bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
1165 	bottom_ic->irq_set_affinity = advk_msi_set_affinity;
1166 
1167 	msi_ic = &pcie->msi_irq_chip;
1168 	msi_ic->name = "advk-MSI";
1169 
1170 	msi_di = &pcie->msi_domain_info;
1171 	msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1172 		MSI_FLAG_MULTI_PCI_MSI;
1173 	msi_di->chip = msi_ic;
1174 
1175 	msi_msg_phys = virt_to_phys(&pcie->msi_msg);
1176 
1177 	advk_writel(pcie, lower_32_bits(msi_msg_phys),
1178 		    PCIE_MSI_ADDR_LOW_REG);
1179 	advk_writel(pcie, upper_32_bits(msi_msg_phys),
1180 		    PCIE_MSI_ADDR_HIGH_REG);
1181 
1182 	pcie->msi_inner_domain =
1183 		irq_domain_add_linear(NULL, MSI_IRQ_NUM,
1184 				      &advk_msi_domain_ops, pcie);
1185 	if (!pcie->msi_inner_domain)
1186 		return -ENOMEM;
1187 
1188 	pcie->msi_domain =
1189 		pci_msi_create_irq_domain(of_node_to_fwnode(node),
1190 					  msi_di, pcie->msi_inner_domain);
1191 	if (!pcie->msi_domain) {
1192 		irq_domain_remove(pcie->msi_inner_domain);
1193 		return -ENOMEM;
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
1200 {
1201 	irq_domain_remove(pcie->msi_domain);
1202 	irq_domain_remove(pcie->msi_inner_domain);
1203 }
1204 
1205 static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
1206 {
1207 	struct device *dev = &pcie->pdev->dev;
1208 	struct device_node *node = dev->of_node;
1209 	struct device_node *pcie_intc_node;
1210 	struct irq_chip *irq_chip;
1211 	int ret = 0;
1212 
1213 	raw_spin_lock_init(&pcie->irq_lock);
1214 
1215 	pcie_intc_node =  of_get_next_child(node, NULL);
1216 	if (!pcie_intc_node) {
1217 		dev_err(dev, "No PCIe Intc node found\n");
1218 		return -ENODEV;
1219 	}
1220 
1221 	irq_chip = &pcie->irq_chip;
1222 
1223 	irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
1224 					dev_name(dev));
1225 	if (!irq_chip->name) {
1226 		ret = -ENOMEM;
1227 		goto out_put_node;
1228 	}
1229 
1230 	irq_chip->irq_mask = advk_pcie_irq_mask;
1231 	irq_chip->irq_mask_ack = advk_pcie_irq_mask;
1232 	irq_chip->irq_unmask = advk_pcie_irq_unmask;
1233 
1234 	pcie->irq_domain =
1235 		irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1236 				      &advk_pcie_irq_domain_ops, pcie);
1237 	if (!pcie->irq_domain) {
1238 		dev_err(dev, "Failed to get a INTx IRQ domain\n");
1239 		ret = -ENOMEM;
1240 		goto out_put_node;
1241 	}
1242 
1243 out_put_node:
1244 	of_node_put(pcie_intc_node);
1245 	return ret;
1246 }
1247 
1248 static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
1249 {
1250 	irq_domain_remove(pcie->irq_domain);
1251 }
1252 
1253 static void advk_pcie_handle_msi(struct advk_pcie *pcie)
1254 {
1255 	u32 msi_val, msi_mask, msi_status, msi_idx;
1256 	u16 msi_data;
1257 
1258 	msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1259 	msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
1260 	msi_status = msi_val & ~msi_mask;
1261 
1262 	for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
1263 		if (!(BIT(msi_idx) & msi_status))
1264 			continue;
1265 
1266 		advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
1267 		msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF;
1268 		generic_handle_irq(msi_data);
1269 	}
1270 
1271 	advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
1272 		    PCIE_ISR0_REG);
1273 }
1274 
1275 static void advk_pcie_handle_int(struct advk_pcie *pcie)
1276 {
1277 	u32 isr0_val, isr0_mask, isr0_status;
1278 	u32 isr1_val, isr1_mask, isr1_status;
1279 	int i;
1280 
1281 	isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
1282 	isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1283 	isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
1284 
1285 	isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
1286 	isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1287 	isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
1288 
1289 	if (!isr0_status && !isr1_status) {
1290 		advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
1291 		advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
1292 		return;
1293 	}
1294 
1295 	/* Process MSI interrupts */
1296 	if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
1297 		advk_pcie_handle_msi(pcie);
1298 
1299 	/* Process legacy interrupts */
1300 	for (i = 0; i < PCI_NUM_INTX; i++) {
1301 		if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
1302 			continue;
1303 
1304 		advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
1305 			    PCIE_ISR1_REG);
1306 
1307 		generic_handle_domain_irq(pcie->irq_domain, i);
1308 	}
1309 }
1310 
1311 static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
1312 {
1313 	struct advk_pcie *pcie = arg;
1314 	u32 status;
1315 
1316 	status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
1317 	if (!(status & PCIE_IRQ_CORE_INT))
1318 		return IRQ_NONE;
1319 
1320 	advk_pcie_handle_int(pcie);
1321 
1322 	/* Clear interrupt */
1323 	advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
1324 
1325 	return IRQ_HANDLED;
1326 }
1327 
1328 static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie)
1329 {
1330 	phy_power_off(pcie->phy);
1331 	phy_exit(pcie->phy);
1332 }
1333 
1334 static int advk_pcie_enable_phy(struct advk_pcie *pcie)
1335 {
1336 	int ret;
1337 
1338 	if (!pcie->phy)
1339 		return 0;
1340 
1341 	ret = phy_init(pcie->phy);
1342 	if (ret)
1343 		return ret;
1344 
1345 	ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
1346 	if (ret) {
1347 		phy_exit(pcie->phy);
1348 		return ret;
1349 	}
1350 
1351 	ret = phy_power_on(pcie->phy);
1352 	if (ret == -EOPNOTSUPP) {
1353 		dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n");
1354 	} else if (ret) {
1355 		phy_exit(pcie->phy);
1356 		return ret;
1357 	}
1358 
1359 	return 0;
1360 }
1361 
1362 static int advk_pcie_setup_phy(struct advk_pcie *pcie)
1363 {
1364 	struct device *dev = &pcie->pdev->dev;
1365 	struct device_node *node = dev->of_node;
1366 	int ret = 0;
1367 
1368 	pcie->phy = devm_of_phy_get(dev, node, NULL);
1369 	if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER))
1370 		return PTR_ERR(pcie->phy);
1371 
1372 	/* Old bindings miss the PHY handle */
1373 	if (IS_ERR(pcie->phy)) {
1374 		dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy));
1375 		pcie->phy = NULL;
1376 		return 0;
1377 	}
1378 
1379 	ret = advk_pcie_enable_phy(pcie);
1380 	if (ret)
1381 		dev_err(dev, "Failed to initialize PHY (%d)\n", ret);
1382 
1383 	return ret;
1384 }
1385 
1386 static int advk_pcie_probe(struct platform_device *pdev)
1387 {
1388 	struct device *dev = &pdev->dev;
1389 	struct advk_pcie *pcie;
1390 	struct pci_host_bridge *bridge;
1391 	struct resource_entry *entry;
1392 	int ret, irq;
1393 
1394 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
1395 	if (!bridge)
1396 		return -ENOMEM;
1397 
1398 	pcie = pci_host_bridge_priv(bridge);
1399 	pcie->pdev = pdev;
1400 	platform_set_drvdata(pdev, pcie);
1401 
1402 	resource_list_for_each_entry(entry, &bridge->windows) {
1403 		resource_size_t start = entry->res->start;
1404 		resource_size_t size = resource_size(entry->res);
1405 		unsigned long type = resource_type(entry->res);
1406 		u64 win_size;
1407 
1408 		/*
1409 		 * Aardvark hardware allows to configure also PCIe window
1410 		 * for config type 0 and type 1 mapping, but driver uses
1411 		 * only PIO for issuing configuration transfers which does
1412 		 * not use PCIe window configuration.
1413 		 */
1414 		if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
1415 		    type != IORESOURCE_IO)
1416 			continue;
1417 
1418 		/*
1419 		 * Skip transparent memory resources. Default outbound access
1420 		 * configuration is set to transparent memory access so it
1421 		 * does not need window configuration.
1422 		 */
1423 		if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
1424 		    entry->offset == 0)
1425 			continue;
1426 
1427 		/*
1428 		 * The n-th PCIe window is configured by tuple (match, remap, mask)
1429 		 * and an access to address A uses this window if A matches the
1430 		 * match with given mask.
1431 		 * So every PCIe window size must be a power of two and every start
1432 		 * address must be aligned to window size. Minimal size is 64 KiB
1433 		 * because lower 16 bits of mask must be zero. Remapped address
1434 		 * may have set only bits from the mask.
1435 		 */
1436 		while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
1437 			/* Calculate the largest aligned window size */
1438 			win_size = (1ULL << (fls64(size)-1)) |
1439 				   (start ? (1ULL << __ffs64(start)) : 0);
1440 			win_size = 1ULL << __ffs64(win_size);
1441 			if (win_size < 0x10000)
1442 				break;
1443 
1444 			dev_dbg(dev,
1445 				"Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
1446 				pcie->wins_count, (unsigned long long)start,
1447 				(unsigned long long)start + win_size, type);
1448 
1449 			if (type == IORESOURCE_IO) {
1450 				pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
1451 				pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
1452 			} else {
1453 				pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
1454 				pcie->wins[pcie->wins_count].match = start;
1455 			}
1456 			pcie->wins[pcie->wins_count].remap = start - entry->offset;
1457 			pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
1458 
1459 			if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
1460 				break;
1461 
1462 			start += win_size;
1463 			size -= win_size;
1464 			pcie->wins_count++;
1465 		}
1466 
1467 		if (size > 0) {
1468 			dev_err(&pcie->pdev->dev,
1469 				"Invalid PCIe region [0x%llx-0x%llx]\n",
1470 				(unsigned long long)entry->res->start,
1471 				(unsigned long long)entry->res->end + 1);
1472 			return -EINVAL;
1473 		}
1474 	}
1475 
1476 	pcie->base = devm_platform_ioremap_resource(pdev, 0);
1477 	if (IS_ERR(pcie->base))
1478 		return PTR_ERR(pcie->base);
1479 
1480 	irq = platform_get_irq(pdev, 0);
1481 	if (irq < 0)
1482 		return irq;
1483 
1484 	ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
1485 			       IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
1486 			       pcie);
1487 	if (ret) {
1488 		dev_err(dev, "Failed to register interrupt\n");
1489 		return ret;
1490 	}
1491 
1492 	pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
1493 						       "reset-gpios", 0,
1494 						       GPIOD_OUT_LOW,
1495 						       "pcie1-reset");
1496 	ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
1497 	if (ret) {
1498 		if (ret == -ENOENT) {
1499 			pcie->reset_gpio = NULL;
1500 		} else {
1501 			if (ret != -EPROBE_DEFER)
1502 				dev_err(dev, "Failed to get reset-gpio: %i\n",
1503 					ret);
1504 			return ret;
1505 		}
1506 	}
1507 
1508 	ret = of_pci_get_max_link_speed(dev->of_node);
1509 	if (ret <= 0 || ret > 3)
1510 		pcie->link_gen = 3;
1511 	else
1512 		pcie->link_gen = ret;
1513 
1514 	ret = advk_pcie_setup_phy(pcie);
1515 	if (ret)
1516 		return ret;
1517 
1518 	advk_pcie_setup_hw(pcie);
1519 
1520 	ret = advk_sw_pci_bridge_init(pcie);
1521 	if (ret) {
1522 		dev_err(dev, "Failed to register emulated root PCI bridge\n");
1523 		return ret;
1524 	}
1525 
1526 	ret = advk_pcie_init_irq_domain(pcie);
1527 	if (ret) {
1528 		dev_err(dev, "Failed to initialize irq\n");
1529 		return ret;
1530 	}
1531 
1532 	ret = advk_pcie_init_msi_irq_domain(pcie);
1533 	if (ret) {
1534 		dev_err(dev, "Failed to initialize irq\n");
1535 		advk_pcie_remove_irq_domain(pcie);
1536 		return ret;
1537 	}
1538 
1539 	bridge->sysdata = pcie;
1540 	bridge->ops = &advk_pcie_ops;
1541 
1542 	ret = pci_host_probe(bridge);
1543 	if (ret < 0) {
1544 		advk_pcie_remove_msi_irq_domain(pcie);
1545 		advk_pcie_remove_irq_domain(pcie);
1546 		return ret;
1547 	}
1548 
1549 	return 0;
1550 }
1551 
1552 static int advk_pcie_remove(struct platform_device *pdev)
1553 {
1554 	struct advk_pcie *pcie = platform_get_drvdata(pdev);
1555 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1556 	int i;
1557 
1558 	pci_lock_rescan_remove();
1559 	pci_stop_root_bus(bridge->bus);
1560 	pci_remove_root_bus(bridge->bus);
1561 	pci_unlock_rescan_remove();
1562 
1563 	advk_pcie_remove_msi_irq_domain(pcie);
1564 	advk_pcie_remove_irq_domain(pcie);
1565 
1566 	/* Disable outbound address windows mapping */
1567 	for (i = 0; i < OB_WIN_COUNT; i++)
1568 		advk_pcie_disable_ob_win(pcie, i);
1569 
1570 	return 0;
1571 }
1572 
1573 static const struct of_device_id advk_pcie_of_match_table[] = {
1574 	{ .compatible = "marvell,armada-3700-pcie", },
1575 	{},
1576 };
1577 MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);
1578 
1579 static struct platform_driver advk_pcie_driver = {
1580 	.driver = {
1581 		.name = "advk-pcie",
1582 		.of_match_table = advk_pcie_of_match_table,
1583 	},
1584 	.probe = advk_pcie_probe,
1585 	.remove = advk_pcie_remove,
1586 };
1587 module_platform_driver(advk_pcie_driver);
1588 
1589 MODULE_DESCRIPTION("Aardvark PCIe controller");
1590 MODULE_LICENSE("GPL v2");
1591