1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe driver for Marvell Armada 370 and Armada XP SoCs
4  *
5  * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/bitfield.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/init.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/mbus.h>
19 #include <linux/slab.h>
20 #include <linux/platform_device.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_pci.h>
24 #include <linux/of_platform.h>
25 
26 #include "../pci.h"
27 #include "../pci-bridge-emul.h"
28 
29 /*
30  * PCIe unit register offsets.
31  */
32 #define PCIE_DEV_ID_OFF		0x0000
33 #define PCIE_CMD_OFF		0x0004
34 #define PCIE_DEV_REV_OFF	0x0008
35 #define PCIE_BAR_LO_OFF(n)	(0x0010 + ((n) << 3))
36 #define PCIE_BAR_HI_OFF(n)	(0x0014 + ((n) << 3))
37 #define PCIE_SSDEV_ID_OFF	0x002c
38 #define PCIE_CAP_PCIEXP		0x0060
39 #define PCIE_CAP_PCIERR_OFF	0x0100
40 #define PCIE_BAR_CTRL_OFF(n)	(0x1804 + (((n) - 1) * 4))
41 #define PCIE_WIN04_CTRL_OFF(n)	(0x1820 + ((n) << 4))
42 #define PCIE_WIN04_BASE_OFF(n)	(0x1824 + ((n) << 4))
43 #define PCIE_WIN04_REMAP_OFF(n)	(0x182c + ((n) << 4))
44 #define PCIE_WIN5_CTRL_OFF	0x1880
45 #define PCIE_WIN5_BASE_OFF	0x1884
46 #define PCIE_WIN5_REMAP_OFF	0x188c
47 #define PCIE_CONF_ADDR_OFF	0x18f8
48 #define  PCIE_CONF_ADDR_EN		0x80000000
49 #define  PCIE_CONF_REG(r)		((((r) & 0xf00) << 16) | ((r) & 0xfc))
50 #define  PCIE_CONF_BUS(b)		(((b) & 0xff) << 16)
51 #define  PCIE_CONF_DEV(d)		(((d) & 0x1f) << 11)
52 #define  PCIE_CONF_FUNC(f)		(((f) & 0x7) << 8)
53 #define  PCIE_CONF_ADDR(bus, devfn, where) \
54 	(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn))    | \
55 	 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
56 	 PCIE_CONF_ADDR_EN)
57 #define PCIE_CONF_DATA_OFF	0x18fc
58 #define PCIE_INT_CAUSE_OFF	0x1900
59 #define PCIE_INT_UNMASK_OFF	0x1910
60 #define  PCIE_INT_INTX(i)		BIT(24+i)
61 #define  PCIE_INT_PM_PME		BIT(28)
62 #define  PCIE_INT_ALL_MASK		GENMASK(31, 0)
63 #define PCIE_CTRL_OFF		0x1a00
64 #define  PCIE_CTRL_X1_MODE		0x0001
65 #define  PCIE_CTRL_RC_MODE		BIT(1)
66 #define  PCIE_CTRL_MASTER_HOT_RESET	BIT(24)
67 #define PCIE_STAT_OFF		0x1a04
68 #define  PCIE_STAT_BUS                  0xff00
69 #define  PCIE_STAT_DEV                  0x1f0000
70 #define  PCIE_STAT_LINK_DOWN		BIT(0)
71 #define PCIE_SSPL_OFF		0x1a0c
72 #define  PCIE_SSPL_VALUE_SHIFT		0
73 #define  PCIE_SSPL_VALUE_MASK		GENMASK(7, 0)
74 #define  PCIE_SSPL_SCALE_SHIFT		8
75 #define  PCIE_SSPL_SCALE_MASK		GENMASK(9, 8)
76 #define  PCIE_SSPL_ENABLE		BIT(16)
77 #define PCIE_RC_RTSTA		0x1a14
78 #define PCIE_DEBUG_CTRL         0x1a60
79 #define  PCIE_DEBUG_SOFT_RESET		BIT(20)
80 
81 struct mvebu_pcie_port;
82 
83 /* Structure representing all PCIe interfaces */
84 struct mvebu_pcie {
85 	struct platform_device *pdev;
86 	struct mvebu_pcie_port *ports;
87 	struct resource io;
88 	struct resource realio;
89 	struct resource mem;
90 	struct resource busn;
91 	int nports;
92 };
93 
94 struct mvebu_pcie_window {
95 	phys_addr_t base;
96 	phys_addr_t remap;
97 	size_t size;
98 };
99 
100 /* Structure representing one PCIe interface */
101 struct mvebu_pcie_port {
102 	char *name;
103 	void __iomem *base;
104 	u32 port;
105 	u32 lane;
106 	bool is_x4;
107 	int devfn;
108 	unsigned int mem_target;
109 	unsigned int mem_attr;
110 	unsigned int io_target;
111 	unsigned int io_attr;
112 	struct clk *clk;
113 	struct gpio_desc *reset_gpio;
114 	char *reset_name;
115 	struct pci_bridge_emul bridge;
116 	struct device_node *dn;
117 	struct mvebu_pcie *pcie;
118 	struct mvebu_pcie_window memwin;
119 	struct mvebu_pcie_window iowin;
120 	u32 saved_pcie_stat;
121 	struct resource regs;
122 	u8 slot_power_limit_value;
123 	u8 slot_power_limit_scale;
124 	struct irq_domain *intx_irq_domain;
125 	raw_spinlock_t irq_lock;
126 	int intx_irq;
127 };
128 
129 static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
130 {
131 	writel(val, port->base + reg);
132 }
133 
134 static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
135 {
136 	return readl(port->base + reg);
137 }
138 
139 static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
140 {
141 	return port->io_target != -1 && port->io_attr != -1;
142 }
143 
144 static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
145 {
146 	return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
147 }
148 
149 static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
150 {
151 	return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
152 }
153 
154 static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
155 {
156 	u32 stat;
157 
158 	stat = mvebu_readl(port, PCIE_STAT_OFF);
159 	stat &= ~PCIE_STAT_BUS;
160 	stat |= nr << 8;
161 	mvebu_writel(port, stat, PCIE_STAT_OFF);
162 }
163 
164 static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
165 {
166 	u32 stat;
167 
168 	stat = mvebu_readl(port, PCIE_STAT_OFF);
169 	stat &= ~PCIE_STAT_DEV;
170 	stat |= nr << 16;
171 	mvebu_writel(port, stat, PCIE_STAT_OFF);
172 }
173 
174 static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
175 {
176 	int i;
177 
178 	mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
179 	mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
180 
181 	for (i = 1; i < 3; i++) {
182 		mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
183 		mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
184 		mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
185 	}
186 
187 	for (i = 0; i < 5; i++) {
188 		mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
189 		mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
190 		mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
191 	}
192 
193 	mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
194 	mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
195 	mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
196 }
197 
198 /*
199  * Setup PCIE BARs and Address Decode Wins:
200  * BAR[0] -> internal registers (needed for MSI)
201  * BAR[1] -> covers all DRAM banks
202  * BAR[2] -> Disabled
203  * WIN[0-3] -> DRAM bank[0-3]
204  */
205 static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
206 {
207 	const struct mbus_dram_target_info *dram;
208 	u32 size;
209 	int i;
210 
211 	dram = mv_mbus_dram_info();
212 
213 	/* First, disable and clear BARs and windows. */
214 	mvebu_pcie_disable_wins(port);
215 
216 	/* Setup windows for DDR banks.  Count total DDR size on the fly. */
217 	size = 0;
218 	for (i = 0; i < dram->num_cs; i++) {
219 		const struct mbus_dram_window *cs = dram->cs + i;
220 
221 		mvebu_writel(port, cs->base & 0xffff0000,
222 			     PCIE_WIN04_BASE_OFF(i));
223 		mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
224 		mvebu_writel(port,
225 			     ((cs->size - 1) & 0xffff0000) |
226 			     (cs->mbus_attr << 8) |
227 			     (dram->mbus_dram_target_id << 4) | 1,
228 			     PCIE_WIN04_CTRL_OFF(i));
229 
230 		size += cs->size;
231 	}
232 
233 	/* Round up 'size' to the nearest power of two. */
234 	if ((size & (size - 1)) != 0)
235 		size = 1 << fls(size);
236 
237 	/* Setup BAR[1] to all DRAM banks. */
238 	mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
239 	mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
240 	mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
241 		     PCIE_BAR_CTRL_OFF(1));
242 
243 	/*
244 	 * Point BAR[0] to the device's internal registers.
245 	 */
246 	mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
247 	mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
248 }
249 
250 static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
251 {
252 	u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
253 
254 	/* Setup PCIe controller to Root Complex mode. */
255 	ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
256 	ctrl |= PCIE_CTRL_RC_MODE;
257 	mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
258 
259 	/*
260 	 * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link
261 	 * Capability register. This register is defined by PCIe specification
262 	 * as read-only but this mvebu controller has it as read-write and must
263 	 * be set to number of SerDes PCIe lanes (1 or 4). If this register is
264 	 * not set correctly then link with endpoint card is not established.
265 	 */
266 	lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
267 	lnkcap &= ~PCI_EXP_LNKCAP_MLW;
268 	lnkcap |= (port->is_x4 ? 4 : 1) << 4;
269 	mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
270 
271 	/* Disable Root Bridge I/O space, memory space and bus mastering. */
272 	cmd = mvebu_readl(port, PCIE_CMD_OFF);
273 	cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
274 	mvebu_writel(port, cmd, PCIE_CMD_OFF);
275 
276 	/*
277 	 * Change Class Code of PCI Bridge device to PCI Bridge (0x6004)
278 	 * because default value is Memory controller (0x5080).
279 	 *
280 	 * Note that this mvebu PCI Bridge does not have compliant Type 1
281 	 * Configuration Space. Header Type is reported as Type 0 and it
282 	 * has format of Type 0 config space.
283 	 *
284 	 * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34)
285 	 * have the same format in Marvell's specification as in PCIe
286 	 * specification, but their meaning is totally different and they do
287 	 * different things: they are aliased into internal mvebu registers
288 	 * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or
289 	 * reconfigured by pci device drivers.
290 	 *
291 	 * Therefore driver uses emulation of PCI Bridge which emulates
292 	 * access to configuration space via internal mvebu registers or
293 	 * emulated configuration buffer. Driver access these PCI Bridge
294 	 * directly for simplification, but these registers can be accessed
295 	 * also via standard mvebu way for accessing PCI config space.
296 	 */
297 	dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
298 	dev_rev &= ~0xffffff00;
299 	dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
300 	mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
301 
302 	/* Point PCIe unit MBUS decode windows to DRAM space. */
303 	mvebu_pcie_setup_wins(port);
304 
305 	/*
306 	 * Program Root Port to automatically send Set_Slot_Power_Limit
307 	 * PCIe Message when changing status from Dl_Down to Dl_Up and valid
308 	 * slot power limit was specified.
309 	 */
310 	sspl = mvebu_readl(port, PCIE_SSPL_OFF);
311 	sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
312 	if (port->slot_power_limit_value) {
313 		sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
314 		sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
315 		sspl |= PCIE_SSPL_ENABLE;
316 	}
317 	mvebu_writel(port, sspl, PCIE_SSPL_OFF);
318 
319 	/* Mask all interrupt sources. */
320 	mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
321 
322 	/* Clear all interrupt causes. */
323 	mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
324 
325 	/* Check if "intx" interrupt was specified in DT. */
326 	if (port->intx_irq > 0)
327 		return;
328 
329 	/*
330 	 * Fallback code when "intx" interrupt was not specified in DT:
331 	 * Unmask all legacy INTx interrupts as driver does not provide a way
332 	 * for masking and unmasking of individual legacy INTx interrupts.
333 	 * Legacy INTx are reported via one shared GIC source and therefore
334 	 * kernel cannot distinguish which individual legacy INTx was triggered.
335 	 * These interrupts are shared, so it should not cause any issue. Just
336 	 * performance penalty as every PCIe interrupt handler needs to be
337 	 * called when some interrupt is triggered.
338 	 */
339 	unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
340 	unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
341 		  PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
342 	mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
343 }
344 
345 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
346 						    struct pci_bus *bus,
347 						    int devfn);
348 
349 static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
350 				    int size, u32 *val)
351 {
352 	struct mvebu_pcie *pcie = bus->sysdata;
353 	struct mvebu_pcie_port *port;
354 	void __iomem *conf_data;
355 
356 	port = mvebu_pcie_find_port(pcie, bus, devfn);
357 	if (!port)
358 		return PCIBIOS_DEVICE_NOT_FOUND;
359 
360 	if (!mvebu_pcie_link_up(port))
361 		return PCIBIOS_DEVICE_NOT_FOUND;
362 
363 	conf_data = port->base + PCIE_CONF_DATA_OFF;
364 
365 	mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
366 		     PCIE_CONF_ADDR_OFF);
367 
368 	switch (size) {
369 	case 1:
370 		*val = readb_relaxed(conf_data + (where & 3));
371 		break;
372 	case 2:
373 		*val = readw_relaxed(conf_data + (where & 2));
374 		break;
375 	case 4:
376 		*val = readl_relaxed(conf_data);
377 		break;
378 	default:
379 		return PCIBIOS_BAD_REGISTER_NUMBER;
380 	}
381 
382 	return PCIBIOS_SUCCESSFUL;
383 }
384 
385 static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
386 				    int where, int size, u32 val)
387 {
388 	struct mvebu_pcie *pcie = bus->sysdata;
389 	struct mvebu_pcie_port *port;
390 	void __iomem *conf_data;
391 
392 	port = mvebu_pcie_find_port(pcie, bus, devfn);
393 	if (!port)
394 		return PCIBIOS_DEVICE_NOT_FOUND;
395 
396 	if (!mvebu_pcie_link_up(port))
397 		return PCIBIOS_DEVICE_NOT_FOUND;
398 
399 	conf_data = port->base + PCIE_CONF_DATA_OFF;
400 
401 	mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
402 		     PCIE_CONF_ADDR_OFF);
403 
404 	switch (size) {
405 	case 1:
406 		writeb(val, conf_data + (where & 3));
407 		break;
408 	case 2:
409 		writew(val, conf_data + (where & 2));
410 		break;
411 	case 4:
412 		writel(val, conf_data);
413 		break;
414 	default:
415 		return PCIBIOS_BAD_REGISTER_NUMBER;
416 	}
417 
418 	return PCIBIOS_SUCCESSFUL;
419 }
420 
421 static struct pci_ops mvebu_pcie_child_ops = {
422 	.read = mvebu_pcie_child_rd_conf,
423 	.write = mvebu_pcie_child_wr_conf,
424 };
425 
426 /*
427  * Remove windows, starting from the largest ones to the smallest
428  * ones.
429  */
430 static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
431 				   phys_addr_t base, size_t size)
432 {
433 	while (size) {
434 		size_t sz = 1 << (fls(size) - 1);
435 
436 		mvebu_mbus_del_window(base, sz);
437 		base += sz;
438 		size -= sz;
439 	}
440 }
441 
442 /*
443  * MBus windows can only have a power of two size, but PCI BARs do not
444  * have this constraint. Therefore, we have to split the PCI BAR into
445  * areas each having a power of two size. We start from the largest
446  * one (i.e highest order bit set in the size).
447  */
448 static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
449 				   unsigned int target, unsigned int attribute,
450 				   phys_addr_t base, size_t size,
451 				   phys_addr_t remap)
452 {
453 	size_t size_mapped = 0;
454 
455 	while (size) {
456 		size_t sz = 1 << (fls(size) - 1);
457 		int ret;
458 
459 		ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
460 							sz, remap);
461 		if (ret) {
462 			phys_addr_t end = base + sz - 1;
463 
464 			dev_err(&port->pcie->pdev->dev,
465 				"Could not create MBus window at [mem %pa-%pa]: %d\n",
466 				&base, &end, ret);
467 			mvebu_pcie_del_windows(port, base - size_mapped,
468 					       size_mapped);
469 			return ret;
470 		}
471 
472 		size -= sz;
473 		size_mapped += sz;
474 		base += sz;
475 		if (remap != MVEBU_MBUS_NO_REMAP)
476 			remap += sz;
477 	}
478 
479 	return 0;
480 }
481 
482 static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
483 				  unsigned int target, unsigned int attribute,
484 				  const struct mvebu_pcie_window *desired,
485 				  struct mvebu_pcie_window *cur)
486 {
487 	int ret;
488 
489 	if (desired->base == cur->base && desired->remap == cur->remap &&
490 	    desired->size == cur->size)
491 		return 0;
492 
493 	if (cur->size != 0) {
494 		mvebu_pcie_del_windows(port, cur->base, cur->size);
495 		cur->size = 0;
496 		cur->base = 0;
497 
498 		/*
499 		 * If something tries to change the window while it is enabled
500 		 * the change will not be done atomically. That would be
501 		 * difficult to do in the general case.
502 		 */
503 	}
504 
505 	if (desired->size == 0)
506 		return 0;
507 
508 	ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
509 				     desired->size, desired->remap);
510 	if (ret) {
511 		cur->size = 0;
512 		cur->base = 0;
513 		return ret;
514 	}
515 
516 	*cur = *desired;
517 	return 0;
518 }
519 
520 static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
521 {
522 	struct mvebu_pcie_window desired = {};
523 	struct pci_bridge_emul_conf *conf = &port->bridge.conf;
524 
525 	/* Are the new iobase/iolimit values invalid? */
526 	if (conf->iolimit < conf->iobase ||
527 	    le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
528 		return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
529 					     &desired, &port->iowin);
530 
531 	/*
532 	 * We read the PCI-to-PCI bridge emulated registers, and
533 	 * calculate the base address and size of the address decoding
534 	 * window to setup, according to the PCI-to-PCI bridge
535 	 * specifications. iobase is the bus address, port->iowin_base
536 	 * is the CPU address.
537 	 */
538 	desired.remap = ((conf->iobase & 0xF0) << 8) |
539 			(le16_to_cpu(conf->iobaseupper) << 16);
540 	desired.base = port->pcie->io.start + desired.remap;
541 	desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
542 			 (le16_to_cpu(conf->iolimitupper) << 16)) -
543 			desired.remap) +
544 		       1;
545 
546 	return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
547 				     &port->iowin);
548 }
549 
550 static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
551 {
552 	struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
553 	struct pci_bridge_emul_conf *conf = &port->bridge.conf;
554 
555 	/* Are the new membase/memlimit values invalid? */
556 	if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
557 		return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
558 					     &desired, &port->memwin);
559 
560 	/*
561 	 * We read the PCI-to-PCI bridge emulated registers, and
562 	 * calculate the base address and size of the address decoding
563 	 * window to setup, according to the PCI-to-PCI bridge
564 	 * specifications.
565 	 */
566 	desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
567 	desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
568 		       desired.base + 1;
569 
570 	return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
571 				     &port->memwin);
572 }
573 
574 static pci_bridge_emul_read_status_t
575 mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
576 				     int reg, u32 *value)
577 {
578 	struct mvebu_pcie_port *port = bridge->data;
579 
580 	switch (reg) {
581 	case PCI_COMMAND:
582 		*value = mvebu_readl(port, PCIE_CMD_OFF);
583 		break;
584 
585 	case PCI_PRIMARY_BUS: {
586 		/*
587 		 * From the whole 32bit register we support reading from HW only
588 		 * secondary bus number which is mvebu local bus number.
589 		 * Other bits are retrieved only from emulated config buffer.
590 		 */
591 		__le32 *cfgspace = (__le32 *)&bridge->conf;
592 		u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
593 		val &= ~0xff00;
594 		val |= mvebu_pcie_get_local_bus_nr(port) << 8;
595 		*value = val;
596 		break;
597 	}
598 
599 	case PCI_INTERRUPT_LINE: {
600 		/*
601 		 * From the whole 32bit register we support reading from HW only
602 		 * one bit: PCI_BRIDGE_CTL_BUS_RESET.
603 		 * Other bits are retrieved only from emulated config buffer.
604 		 */
605 		__le32 *cfgspace = (__le32 *)&bridge->conf;
606 		u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
607 		if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
608 			val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
609 		else
610 			val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
611 		*value = val;
612 		break;
613 	}
614 
615 	default:
616 		return PCI_BRIDGE_EMUL_NOT_HANDLED;
617 	}
618 
619 	return PCI_BRIDGE_EMUL_HANDLED;
620 }
621 
622 static pci_bridge_emul_read_status_t
623 mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
624 				     int reg, u32 *value)
625 {
626 	struct mvebu_pcie_port *port = bridge->data;
627 
628 	switch (reg) {
629 	case PCI_EXP_DEVCAP:
630 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
631 		break;
632 
633 	case PCI_EXP_DEVCTL:
634 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
635 		break;
636 
637 	case PCI_EXP_LNKCAP:
638 		/*
639 		 * PCIe requires that the Clock Power Management capability bit
640 		 * is hard-wired to zero for downstream ports but HW returns 1.
641 		 * Additionally enable Data Link Layer Link Active Reporting
642 		 * Capable bit as DL_Active indication is provided too.
643 		 */
644 		*value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
645 			  ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
646 		break;
647 
648 	case PCI_EXP_LNKCTL:
649 		/* DL_Active indication is provided via PCIE_STAT_OFF */
650 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
651 			 (mvebu_pcie_link_up(port) ?
652 			  (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
653 		break;
654 
655 	case PCI_EXP_SLTCTL: {
656 		u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
657 		u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
658 		u32 val = 0;
659 		/*
660 		 * When slot power limit was not specified in DT then
661 		 * ASPL_DISABLE bit is stored only in emulated config space.
662 		 * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW.
663 		 */
664 		if (!port->slot_power_limit_value)
665 			val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
666 		else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
667 			val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
668 		/* This callback is 32-bit and in high bits is slot status. */
669 		val |= slotsta << 16;
670 		*value = val;
671 		break;
672 	}
673 
674 	case PCI_EXP_RTSTA:
675 		*value = mvebu_readl(port, PCIE_RC_RTSTA);
676 		break;
677 
678 	case PCI_EXP_DEVCAP2:
679 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
680 		break;
681 
682 	case PCI_EXP_DEVCTL2:
683 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
684 		break;
685 
686 	case PCI_EXP_LNKCTL2:
687 		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
688 		break;
689 
690 	default:
691 		return PCI_BRIDGE_EMUL_NOT_HANDLED;
692 	}
693 
694 	return PCI_BRIDGE_EMUL_HANDLED;
695 }
696 
697 static pci_bridge_emul_read_status_t
698 mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
699 				    int reg, u32 *value)
700 {
701 	struct mvebu_pcie_port *port = bridge->data;
702 
703 	switch (reg) {
704 	case 0:
705 	case PCI_ERR_UNCOR_STATUS:
706 	case PCI_ERR_UNCOR_MASK:
707 	case PCI_ERR_UNCOR_SEVER:
708 	case PCI_ERR_COR_STATUS:
709 	case PCI_ERR_COR_MASK:
710 	case PCI_ERR_CAP:
711 	case PCI_ERR_HEADER_LOG+0:
712 	case PCI_ERR_HEADER_LOG+4:
713 	case PCI_ERR_HEADER_LOG+8:
714 	case PCI_ERR_HEADER_LOG+12:
715 	case PCI_ERR_ROOT_COMMAND:
716 	case PCI_ERR_ROOT_STATUS:
717 	case PCI_ERR_ROOT_ERR_SRC:
718 		*value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
719 		break;
720 
721 	default:
722 		return PCI_BRIDGE_EMUL_NOT_HANDLED;
723 	}
724 
725 	return PCI_BRIDGE_EMUL_HANDLED;
726 }
727 
728 static void
729 mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
730 				      int reg, u32 old, u32 new, u32 mask)
731 {
732 	struct mvebu_pcie_port *port = bridge->data;
733 	struct pci_bridge_emul_conf *conf = &bridge->conf;
734 
735 	switch (reg) {
736 	case PCI_COMMAND:
737 		mvebu_writel(port, new, PCIE_CMD_OFF);
738 		break;
739 
740 	case PCI_IO_BASE:
741 		if ((mask & 0xffff) && mvebu_has_ioport(port) &&
742 		    mvebu_pcie_handle_iobase_change(port)) {
743 			/* On error disable IO range */
744 			conf->iobase &= ~0xf0;
745 			conf->iolimit &= ~0xf0;
746 			conf->iobase |= 0xf0;
747 			conf->iobaseupper = cpu_to_le16(0x0000);
748 			conf->iolimitupper = cpu_to_le16(0x0000);
749 		}
750 		break;
751 
752 	case PCI_MEMORY_BASE:
753 		if (mvebu_pcie_handle_membase_change(port)) {
754 			/* On error disable mem range */
755 			conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
756 			conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
757 			conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
758 		}
759 		break;
760 
761 	case PCI_IO_BASE_UPPER16:
762 		if (mvebu_has_ioport(port) &&
763 		    mvebu_pcie_handle_iobase_change(port)) {
764 			/* On error disable IO range */
765 			conf->iobase &= ~0xf0;
766 			conf->iolimit &= ~0xf0;
767 			conf->iobase |= 0xf0;
768 			conf->iobaseupper = cpu_to_le16(0x0000);
769 			conf->iolimitupper = cpu_to_le16(0x0000);
770 		}
771 		break;
772 
773 	case PCI_PRIMARY_BUS:
774 		if (mask & 0xff00)
775 			mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
776 		break;
777 
778 	case PCI_INTERRUPT_LINE:
779 		if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
780 			u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
781 			if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
782 				ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
783 			else
784 				ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
785 			mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
786 		}
787 		break;
788 
789 	default:
790 		break;
791 	}
792 }
793 
794 static void
795 mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
796 				      int reg, u32 old, u32 new, u32 mask)
797 {
798 	struct mvebu_pcie_port *port = bridge->data;
799 
800 	switch (reg) {
801 	case PCI_EXP_DEVCTL:
802 		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
803 		break;
804 
805 	case PCI_EXP_LNKCTL:
806 		/*
807 		 * PCIe requires that the Enable Clock Power Management bit
808 		 * is hard-wired to zero for downstream ports but HW allows
809 		 * to change it.
810 		 */
811 		new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
812 
813 		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
814 		break;
815 
816 	case PCI_EXP_SLTCTL:
817 		/*
818 		 * Allow to change PCIE_SSPL_ENABLE bit only when slot power
819 		 * limit was specified in DT and configured into HW.
820 		 */
821 		if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
822 		    port->slot_power_limit_value) {
823 			u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
824 			if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
825 				sspl &= ~PCIE_SSPL_ENABLE;
826 			else
827 				sspl |= PCIE_SSPL_ENABLE;
828 			mvebu_writel(port, sspl, PCIE_SSPL_OFF);
829 		}
830 		break;
831 
832 	case PCI_EXP_RTSTA:
833 		/*
834 		 * PME Status bit in Root Status Register (PCIE_RC_RTSTA)
835 		 * is read-only and can be cleared only by writing 0b to the
836 		 * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So
837 		 * clear PME via Interrupt Cause.
838 		 */
839 		if (new & PCI_EXP_RTSTA_PME)
840 			mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
841 		break;
842 
843 	case PCI_EXP_DEVCTL2:
844 		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
845 		break;
846 
847 	case PCI_EXP_LNKCTL2:
848 		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
849 		break;
850 
851 	default:
852 		break;
853 	}
854 }
855 
856 static void
857 mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
858 				     int reg, u32 old, u32 new, u32 mask)
859 {
860 	struct mvebu_pcie_port *port = bridge->data;
861 
862 	switch (reg) {
863 	/* These are W1C registers, so clear other bits */
864 	case PCI_ERR_UNCOR_STATUS:
865 	case PCI_ERR_COR_STATUS:
866 	case PCI_ERR_ROOT_STATUS:
867 		new &= mask;
868 		fallthrough;
869 
870 	case PCI_ERR_UNCOR_MASK:
871 	case PCI_ERR_UNCOR_SEVER:
872 	case PCI_ERR_COR_MASK:
873 	case PCI_ERR_CAP:
874 	case PCI_ERR_HEADER_LOG+0:
875 	case PCI_ERR_HEADER_LOG+4:
876 	case PCI_ERR_HEADER_LOG+8:
877 	case PCI_ERR_HEADER_LOG+12:
878 	case PCI_ERR_ROOT_COMMAND:
879 	case PCI_ERR_ROOT_ERR_SRC:
880 		mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
881 		break;
882 
883 	default:
884 		break;
885 	}
886 }
887 
888 static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
889 	.read_base = mvebu_pci_bridge_emul_base_conf_read,
890 	.write_base = mvebu_pci_bridge_emul_base_conf_write,
891 	.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
892 	.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
893 	.read_ext = mvebu_pci_bridge_emul_ext_conf_read,
894 	.write_ext = mvebu_pci_bridge_emul_ext_conf_write,
895 };
896 
897 /*
898  * Initialize the configuration space of the PCI-to-PCI bridge
899  * associated with the given PCIe interface.
900  */
901 static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
902 {
903 	unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
904 	struct pci_bridge_emul *bridge = &port->bridge;
905 	u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
906 	u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
907 	u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
908 	u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
909 	u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
910 
911 	bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
912 	bridge->conf.device = cpu_to_le16(dev_id >> 16);
913 	bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
914 
915 	if (mvebu_has_ioport(port)) {
916 		/* We support 32 bits I/O addressing */
917 		bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
918 		bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
919 	} else {
920 		bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
921 	}
922 
923 	/*
924 	 * Older mvebu hardware provides PCIe Capability structure only in
925 	 * version 1. New hardware provides it in version 2.
926 	 * Enable slot support which is emulated.
927 	 */
928 	bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
929 
930 	/*
931 	 * Set Presence Detect State bit permanently as there is no support for
932 	 * unplugging PCIe card from the slot. Assume that PCIe card is always
933 	 * connected in slot.
934 	 *
935 	 * Set physical slot number to port+1 as mvebu ports are indexed from
936 	 * zero and zero value is reserved for ports within the same silicon
937 	 * as Root Port which is not mvebu case.
938 	 *
939 	 * Also set correct slot power limit.
940 	 */
941 	bridge->pcie_conf.slotcap = cpu_to_le32(
942 		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
943 		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
944 		FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
945 	bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
946 
947 	bridge->subsystem_vendor_id = ssdev_id & 0xffff;
948 	bridge->subsystem_id = ssdev_id >> 16;
949 	bridge->has_pcie = true;
950 	bridge->pcie_start = PCIE_CAP_PCIEXP;
951 	bridge->data = port;
952 	bridge->ops = &mvebu_pci_bridge_emul_ops;
953 
954 	return pci_bridge_emul_init(bridge, bridge_flags);
955 }
956 
957 static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
958 {
959 	return sys->private_data;
960 }
961 
962 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
963 						    struct pci_bus *bus,
964 						    int devfn)
965 {
966 	int i;
967 
968 	for (i = 0; i < pcie->nports; i++) {
969 		struct mvebu_pcie_port *port = &pcie->ports[i];
970 
971 		if (!port->base)
972 			continue;
973 
974 		if (bus->number == 0 && port->devfn == devfn)
975 			return port;
976 		if (bus->number != 0 &&
977 		    bus->number >= port->bridge.conf.secondary_bus &&
978 		    bus->number <= port->bridge.conf.subordinate_bus)
979 			return port;
980 	}
981 
982 	return NULL;
983 }
984 
985 /* PCI configuration space write function */
986 static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
987 			      int where, int size, u32 val)
988 {
989 	struct mvebu_pcie *pcie = bus->sysdata;
990 	struct mvebu_pcie_port *port;
991 
992 	port = mvebu_pcie_find_port(pcie, bus, devfn);
993 	if (!port)
994 		return PCIBIOS_DEVICE_NOT_FOUND;
995 
996 	return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
997 }
998 
999 /* PCI configuration space read function */
1000 static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
1001 			      int size, u32 *val)
1002 {
1003 	struct mvebu_pcie *pcie = bus->sysdata;
1004 	struct mvebu_pcie_port *port;
1005 
1006 	port = mvebu_pcie_find_port(pcie, bus, devfn);
1007 	if (!port)
1008 		return PCIBIOS_DEVICE_NOT_FOUND;
1009 
1010 	return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
1011 }
1012 
1013 static struct pci_ops mvebu_pcie_ops = {
1014 	.read = mvebu_pcie_rd_conf,
1015 	.write = mvebu_pcie_wr_conf,
1016 };
1017 
1018 static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
1019 {
1020 	struct mvebu_pcie_port *port = d->domain->host_data;
1021 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1022 	unsigned long flags;
1023 	u32 unmask;
1024 
1025 	raw_spin_lock_irqsave(&port->irq_lock, flags);
1026 	unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1027 	unmask &= ~PCIE_INT_INTX(hwirq);
1028 	mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1029 	raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1030 }
1031 
1032 static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
1033 {
1034 	struct mvebu_pcie_port *port = d->domain->host_data;
1035 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1036 	unsigned long flags;
1037 	u32 unmask;
1038 
1039 	raw_spin_lock_irqsave(&port->irq_lock, flags);
1040 	unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1041 	unmask |= PCIE_INT_INTX(hwirq);
1042 	mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1043 	raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1044 }
1045 
1046 static struct irq_chip intx_irq_chip = {
1047 	.name = "mvebu-INTx",
1048 	.irq_mask = mvebu_pcie_intx_irq_mask,
1049 	.irq_unmask = mvebu_pcie_intx_irq_unmask,
1050 };
1051 
1052 static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
1053 				   unsigned int virq, irq_hw_number_t hwirq)
1054 {
1055 	struct mvebu_pcie_port *port = h->host_data;
1056 
1057 	irq_set_status_flags(virq, IRQ_LEVEL);
1058 	irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
1059 	irq_set_chip_data(virq, port);
1060 
1061 	return 0;
1062 }
1063 
1064 static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
1065 	.map = mvebu_pcie_intx_irq_map,
1066 	.xlate = irq_domain_xlate_onecell,
1067 };
1068 
1069 static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
1070 {
1071 	struct device *dev = &port->pcie->pdev->dev;
1072 	struct device_node *pcie_intc_node;
1073 
1074 	raw_spin_lock_init(&port->irq_lock);
1075 
1076 	pcie_intc_node = of_get_next_child(port->dn, NULL);
1077 	if (!pcie_intc_node) {
1078 		dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
1079 		return -ENODEV;
1080 	}
1081 
1082 	port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1083 						      &mvebu_pcie_intx_irq_domain_ops,
1084 						      port);
1085 	of_node_put(pcie_intc_node);
1086 	if (!port->intx_irq_domain) {
1087 		dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
1088 		return -ENOMEM;
1089 	}
1090 
1091 	return 0;
1092 }
1093 
1094 static void mvebu_pcie_irq_handler(struct irq_desc *desc)
1095 {
1096 	struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
1097 	struct irq_chip *chip = irq_desc_get_chip(desc);
1098 	struct device *dev = &port->pcie->pdev->dev;
1099 	u32 cause, unmask, status;
1100 	int i;
1101 
1102 	chained_irq_enter(chip, desc);
1103 
1104 	cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
1105 	unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1106 	status = cause & unmask;
1107 
1108 	/* Process legacy INTx interrupts */
1109 	for (i = 0; i < PCI_NUM_INTX; i++) {
1110 		if (!(status & PCIE_INT_INTX(i)))
1111 			continue;
1112 
1113 		if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
1114 			dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
1115 	}
1116 
1117 	chained_irq_exit(chip, desc);
1118 }
1119 
1120 static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1121 {
1122 	/* Interrupt support on mvebu emulated bridges is not implemented yet */
1123 	if (dev->bus->number == 0)
1124 		return 0; /* Proper return code 0 == NO_IRQ */
1125 
1126 	return of_irq_parse_and_map_pci(dev, slot, pin);
1127 }
1128 
1129 static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
1130 						 const struct resource *res,
1131 						 resource_size_t start,
1132 						 resource_size_t size,
1133 						 resource_size_t align)
1134 {
1135 	if (dev->bus->number != 0)
1136 		return start;
1137 
1138 	/*
1139 	 * On the PCI-to-PCI bridge side, the I/O windows must have at
1140 	 * least a 64 KB size and the memory windows must have at
1141 	 * least a 1 MB size. Moreover, MBus windows need to have a
1142 	 * base address aligned on their size, and their size must be
1143 	 * a power of two. This means that if the BAR doesn't have a
1144 	 * power of two size, several MBus windows will actually be
1145 	 * created. We need to ensure that the biggest MBus window
1146 	 * (which will be the first one) is aligned on its size, which
1147 	 * explains the rounddown_pow_of_two() being done here.
1148 	 */
1149 	if (res->flags & IORESOURCE_IO)
1150 		return round_up(start, max_t(resource_size_t, SZ_64K,
1151 					     rounddown_pow_of_two(size)));
1152 	else if (res->flags & IORESOURCE_MEM)
1153 		return round_up(start, max_t(resource_size_t, SZ_1M,
1154 					     rounddown_pow_of_two(size)));
1155 	else
1156 		return start;
1157 }
1158 
1159 static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
1160 					      struct device_node *np,
1161 					      struct mvebu_pcie_port *port)
1162 {
1163 	int ret = 0;
1164 
1165 	ret = of_address_to_resource(np, 0, &port->regs);
1166 	if (ret)
1167 		return (void __iomem *)ERR_PTR(ret);
1168 
1169 	return devm_ioremap_resource(&pdev->dev, &port->regs);
1170 }
1171 
1172 #define DT_FLAGS_TO_TYPE(flags)       (((flags) >> 24) & 0x03)
1173 #define    DT_TYPE_IO                 0x1
1174 #define    DT_TYPE_MEM32              0x2
1175 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
1176 #define DT_CPUADDR_TO_ATTR(cpuaddr)   (((cpuaddr) >> 48) & 0xFF)
1177 
1178 static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
1179 			      unsigned long type,
1180 			      unsigned int *tgt,
1181 			      unsigned int *attr)
1182 {
1183 	const int na = 3, ns = 2;
1184 	const __be32 *range;
1185 	int rlen, nranges, rangesz, pna, i;
1186 
1187 	*tgt = -1;
1188 	*attr = -1;
1189 
1190 	range = of_get_property(np, "ranges", &rlen);
1191 	if (!range)
1192 		return -EINVAL;
1193 
1194 	pna = of_n_addr_cells(np);
1195 	rangesz = pna + na + ns;
1196 	nranges = rlen / sizeof(__be32) / rangesz;
1197 
1198 	for (i = 0; i < nranges; i++, range += rangesz) {
1199 		u32 flags = of_read_number(range, 1);
1200 		u32 slot = of_read_number(range + 1, 1);
1201 		u64 cpuaddr = of_read_number(range + na, pna);
1202 		unsigned long rtype;
1203 
1204 		if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
1205 			rtype = IORESOURCE_IO;
1206 		else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
1207 			rtype = IORESOURCE_MEM;
1208 		else
1209 			continue;
1210 
1211 		if (slot == PCI_SLOT(devfn) && type == rtype) {
1212 			*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
1213 			*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
1214 			return 0;
1215 		}
1216 	}
1217 
1218 	return -ENOENT;
1219 }
1220 
1221 static int mvebu_pcie_suspend(struct device *dev)
1222 {
1223 	struct mvebu_pcie *pcie;
1224 	int i;
1225 
1226 	pcie = dev_get_drvdata(dev);
1227 	for (i = 0; i < pcie->nports; i++) {
1228 		struct mvebu_pcie_port *port = pcie->ports + i;
1229 		if (!port->base)
1230 			continue;
1231 		port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 static int mvebu_pcie_resume(struct device *dev)
1238 {
1239 	struct mvebu_pcie *pcie;
1240 	int i;
1241 
1242 	pcie = dev_get_drvdata(dev);
1243 	for (i = 0; i < pcie->nports; i++) {
1244 		struct mvebu_pcie_port *port = pcie->ports + i;
1245 		if (!port->base)
1246 			continue;
1247 		mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
1248 		mvebu_pcie_setup_hw(port);
1249 	}
1250 
1251 	return 0;
1252 }
1253 
1254 static void mvebu_pcie_port_clk_put(void *data)
1255 {
1256 	struct mvebu_pcie_port *port = data;
1257 
1258 	clk_put(port->clk);
1259 }
1260 
1261 static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
1262 	struct mvebu_pcie_port *port, struct device_node *child)
1263 {
1264 	struct device *dev = &pcie->pdev->dev;
1265 	u32 slot_power_limit;
1266 	int ret;
1267 	u32 num_lanes;
1268 
1269 	port->pcie = pcie;
1270 
1271 	if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
1272 		dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
1273 			 child);
1274 		goto skip;
1275 	}
1276 
1277 	if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
1278 		port->lane = 0;
1279 
1280 	if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
1281 		port->is_x4 = true;
1282 
1283 	port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
1284 				    port->lane);
1285 	if (!port->name) {
1286 		ret = -ENOMEM;
1287 		goto err;
1288 	}
1289 
1290 	port->devfn = of_pci_get_devfn(child);
1291 	if (port->devfn < 0)
1292 		goto skip;
1293 	if (PCI_FUNC(port->devfn) != 0) {
1294 		dev_err(dev, "%s: invalid function number, must be zero\n",
1295 			port->name);
1296 		goto skip;
1297 	}
1298 
1299 	ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
1300 				 &port->mem_target, &port->mem_attr);
1301 	if (ret < 0) {
1302 		dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
1303 			port->name);
1304 		goto skip;
1305 	}
1306 
1307 	if (resource_size(&pcie->io) != 0) {
1308 		mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
1309 				   &port->io_target, &port->io_attr);
1310 	} else {
1311 		port->io_target = -1;
1312 		port->io_attr = -1;
1313 	}
1314 
1315 	/*
1316 	 * Old DT bindings do not contain "intx" interrupt
1317 	 * so do not fail probing driver when interrupt does not exist.
1318 	 */
1319 	port->intx_irq = of_irq_get_byname(child, "intx");
1320 	if (port->intx_irq == -EPROBE_DEFER) {
1321 		ret = port->intx_irq;
1322 		goto err;
1323 	}
1324 	if (port->intx_irq <= 0) {
1325 		dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
1326 			      "%pOF does not contain intx interrupt\n",
1327 			 port->name, child);
1328 	}
1329 
1330 	port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
1331 					  port->name);
1332 	if (!port->reset_name) {
1333 		ret = -ENOMEM;
1334 		goto err;
1335 	}
1336 
1337 	port->reset_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(child),
1338 						 "reset", GPIOD_OUT_HIGH,
1339 						 port->name);
1340 	ret = PTR_ERR_OR_ZERO(port->reset_gpio);
1341 	if (ret) {
1342 		if (ret != -ENOENT)
1343 			goto err;
1344 		/* reset gpio is optional */
1345 		port->reset_gpio = NULL;
1346 		devm_kfree(dev, port->reset_name);
1347 		port->reset_name = NULL;
1348 	}
1349 
1350 	slot_power_limit = of_pci_get_slot_power_limit(child,
1351 				&port->slot_power_limit_value,
1352 				&port->slot_power_limit_scale);
1353 	if (slot_power_limit)
1354 		dev_info(dev, "%s: Slot power limit %u.%uW\n",
1355 			 port->name,
1356 			 slot_power_limit / 1000,
1357 			 (slot_power_limit / 100) % 10);
1358 
1359 	port->clk = of_clk_get_by_name(child, NULL);
1360 	if (IS_ERR(port->clk)) {
1361 		dev_err(dev, "%s: cannot get clock\n", port->name);
1362 		goto skip;
1363 	}
1364 
1365 	ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
1366 	if (ret < 0) {
1367 		clk_put(port->clk);
1368 		goto err;
1369 	}
1370 
1371 	return 1;
1372 
1373 skip:
1374 	ret = 0;
1375 
1376 	/* In the case of skipping, we need to free these */
1377 	devm_kfree(dev, port->reset_name);
1378 	port->reset_name = NULL;
1379 	devm_kfree(dev, port->name);
1380 	port->name = NULL;
1381 
1382 err:
1383 	return ret;
1384 }
1385 
1386 /*
1387  * Power up a PCIe port.  PCIe requires the refclk to be stable for 100µs
1388  * prior to releasing PERST.  See table 2-4 in section 2.6.2 AC Specifications
1389  * of the PCI Express Card Electromechanical Specification, 1.1.
1390  */
1391 static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
1392 {
1393 	int ret;
1394 
1395 	ret = clk_prepare_enable(port->clk);
1396 	if (ret < 0)
1397 		return ret;
1398 
1399 	if (port->reset_gpio) {
1400 		u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
1401 
1402 		of_property_read_u32(port->dn, "reset-delay-us",
1403 				     &reset_udelay);
1404 
1405 		udelay(100);
1406 
1407 		gpiod_set_value_cansleep(port->reset_gpio, 0);
1408 		msleep(reset_udelay / 1000);
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 /*
1415  * Power down a PCIe port.  Strictly, PCIe requires us to place the card
1416  * in D3hot state before asserting PERST#.
1417  */
1418 static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
1419 {
1420 	gpiod_set_value_cansleep(port->reset_gpio, 1);
1421 
1422 	clk_disable_unprepare(port->clk);
1423 }
1424 
1425 /*
1426  * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
1427  * so we need extra resource setup parsing our special DT properties encoding
1428  * the MEM and IO apertures.
1429  */
1430 static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
1431 {
1432 	struct device *dev = &pcie->pdev->dev;
1433 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1434 	int ret;
1435 
1436 	/* Get the PCIe memory aperture */
1437 	mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
1438 	if (resource_size(&pcie->mem) == 0) {
1439 		dev_err(dev, "invalid memory aperture size\n");
1440 		return -EINVAL;
1441 	}
1442 
1443 	pcie->mem.name = "PCI MEM";
1444 	pci_add_resource(&bridge->windows, &pcie->mem);
1445 	ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
1446 	if (ret)
1447 		return ret;
1448 
1449 	/* Get the PCIe IO aperture */
1450 	mvebu_mbus_get_pcie_io_aperture(&pcie->io);
1451 
1452 	if (resource_size(&pcie->io) != 0) {
1453 		pcie->realio.flags = pcie->io.flags;
1454 		pcie->realio.start = PCIBIOS_MIN_IO;
1455 		pcie->realio.end = min_t(resource_size_t,
1456 					 IO_SPACE_LIMIT - SZ_64K,
1457 					 resource_size(&pcie->io) - 1);
1458 		pcie->realio.name = "PCI I/O";
1459 
1460 		ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
1461 		if (ret)
1462 			return ret;
1463 
1464 		pci_add_resource(&bridge->windows, &pcie->realio);
1465 		ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
1466 		if (ret)
1467 			return ret;
1468 	}
1469 
1470 	return 0;
1471 }
1472 
1473 static int mvebu_pcie_probe(struct platform_device *pdev)
1474 {
1475 	struct device *dev = &pdev->dev;
1476 	struct mvebu_pcie *pcie;
1477 	struct pci_host_bridge *bridge;
1478 	struct device_node *np = dev->of_node;
1479 	struct device_node *child;
1480 	int num, i, ret;
1481 
1482 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
1483 	if (!bridge)
1484 		return -ENOMEM;
1485 
1486 	pcie = pci_host_bridge_priv(bridge);
1487 	pcie->pdev = pdev;
1488 	platform_set_drvdata(pdev, pcie);
1489 
1490 	ret = mvebu_pcie_parse_request_resources(pcie);
1491 	if (ret)
1492 		return ret;
1493 
1494 	num = of_get_available_child_count(np);
1495 
1496 	pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
1497 	if (!pcie->ports)
1498 		return -ENOMEM;
1499 
1500 	i = 0;
1501 	for_each_available_child_of_node(np, child) {
1502 		struct mvebu_pcie_port *port = &pcie->ports[i];
1503 
1504 		ret = mvebu_pcie_parse_port(pcie, port, child);
1505 		if (ret < 0) {
1506 			of_node_put(child);
1507 			return ret;
1508 		} else if (ret == 0) {
1509 			continue;
1510 		}
1511 
1512 		port->dn = child;
1513 		i++;
1514 	}
1515 	pcie->nports = i;
1516 
1517 	for (i = 0; i < pcie->nports; i++) {
1518 		struct mvebu_pcie_port *port = &pcie->ports[i];
1519 		int irq = port->intx_irq;
1520 
1521 		child = port->dn;
1522 		if (!child)
1523 			continue;
1524 
1525 		ret = mvebu_pcie_powerup(port);
1526 		if (ret < 0)
1527 			continue;
1528 
1529 		port->base = mvebu_pcie_map_registers(pdev, child, port);
1530 		if (IS_ERR(port->base)) {
1531 			dev_err(dev, "%s: cannot map registers\n", port->name);
1532 			port->base = NULL;
1533 			mvebu_pcie_powerdown(port);
1534 			continue;
1535 		}
1536 
1537 		ret = mvebu_pci_bridge_emul_init(port);
1538 		if (ret < 0) {
1539 			dev_err(dev, "%s: cannot init emulated bridge\n",
1540 				port->name);
1541 			devm_iounmap(dev, port->base);
1542 			port->base = NULL;
1543 			mvebu_pcie_powerdown(port);
1544 			continue;
1545 		}
1546 
1547 		if (irq > 0) {
1548 			ret = mvebu_pcie_init_irq_domain(port);
1549 			if (ret) {
1550 				dev_err(dev, "%s: cannot init irq domain\n",
1551 					port->name);
1552 				pci_bridge_emul_cleanup(&port->bridge);
1553 				devm_iounmap(dev, port->base);
1554 				port->base = NULL;
1555 				mvebu_pcie_powerdown(port);
1556 				continue;
1557 			}
1558 			irq_set_chained_handler_and_data(irq,
1559 							 mvebu_pcie_irq_handler,
1560 							 port);
1561 		}
1562 
1563 		/*
1564 		 * PCIe topology exported by mvebu hw is quite complicated. In
1565 		 * reality has something like N fully independent host bridges
1566 		 * where each host bridge has one PCIe Root Port (which acts as
1567 		 * PCI Bridge device). Each host bridge has its own independent
1568 		 * internal registers, independent access to PCI config space,
1569 		 * independent interrupt lines, independent window and memory
1570 		 * access configuration. But additionally there is some kind of
1571 		 * peer-to-peer support between PCIe devices behind different
1572 		 * host bridges limited just to forwarding of memory and I/O
1573 		 * transactions (forwarding of error messages and config cycles
1574 		 * is not supported). So we could say there are N independent
1575 		 * PCIe Root Complexes.
1576 		 *
1577 		 * For this kind of setup DT should have been structured into
1578 		 * N independent PCIe controllers / host bridges. But instead
1579 		 * structure in past was defined to put PCIe Root Ports of all
1580 		 * host bridges into one bus zero, like in classic multi-port
1581 		 * Root Complex setup with just one host bridge.
1582 		 *
1583 		 * This means that pci-mvebu.c driver provides "virtual" bus 0
1584 		 * on which registers all PCIe Root Ports (PCI Bridge devices)
1585 		 * specified in DT by their BDF addresses and virtually routes
1586 		 * PCI config access of each PCI bridge device to specific PCIe
1587 		 * host bridge.
1588 		 *
1589 		 * Normally PCI Bridge should choose between Type 0 and Type 1
1590 		 * config requests based on primary and secondary bus numbers
1591 		 * configured on the bridge itself. But because mvebu PCI Bridge
1592 		 * does not have registers for primary and secondary bus numbers
1593 		 * in its config space, it determinates type of config requests
1594 		 * via its own custom way.
1595 		 *
1596 		 * There are two options how mvebu determinate type of config
1597 		 * request.
1598 		 *
1599 		 * 1. If Secondary Bus Number Enable bit is not set or is not
1600 		 * available (applies for pre-XP PCIe controllers) then Type 0
1601 		 * is used if target bus number equals Local Bus Number (bits
1602 		 * [15:8] in register 0x1a04) and target device number differs
1603 		 * from Local Device Number (bits [20:16] in register 0x1a04).
1604 		 * Type 1 is used if target bus number differs from Local Bus
1605 		 * Number. And when target bus number equals Local Bus Number
1606 		 * and target device equals Local Device Number then request is
1607 		 * routed to Local PCI Bridge (PCIe Root Port).
1608 		 *
1609 		 * 2. If Secondary Bus Number Enable bit is set (bit 7 in
1610 		 * register 0x1a2c) then mvebu hw determinate type of config
1611 		 * request like compliant PCI Bridge based on primary bus number
1612 		 * which is configured via Local Bus Number (bits [15:8] in
1613 		 * register 0x1a04) and secondary bus number which is configured
1614 		 * via Secondary Bus Number (bits [7:0] in register 0x1a2c).
1615 		 * Local PCI Bridge (PCIe Root Port) is available on primary bus
1616 		 * as device with Local Device Number (bits [20:16] in register
1617 		 * 0x1a04).
1618 		 *
1619 		 * Secondary Bus Number Enable bit is disabled by default and
1620 		 * option 2. is not available on pre-XP PCIe controllers. Hence
1621 		 * this driver always use option 1.
1622 		 *
1623 		 * Basically it means that primary and secondary buses shares
1624 		 * one virtual number configured via Local Bus Number bits and
1625 		 * Local Device Number bits determinates if accessing primary
1626 		 * or secondary bus. Set Local Device Number to 1 and redirect
1627 		 * all writes of PCI Bridge Secondary Bus Number register to
1628 		 * Local Bus Number (bits [15:8] in register 0x1a04).
1629 		 *
1630 		 * So when accessing devices on buses behind secondary bus
1631 		 * number it would work correctly. And also when accessing
1632 		 * device 0 at secondary bus number via config space would be
1633 		 * correctly routed to secondary bus. Due to issues described
1634 		 * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero)
1635 		 * are not accessed directly via PCI config space but rarher
1636 		 * indirectly via kernel emulated PCI bridge driver.
1637 		 */
1638 		mvebu_pcie_setup_hw(port);
1639 		mvebu_pcie_set_local_dev_nr(port, 1);
1640 		mvebu_pcie_set_local_bus_nr(port, 0);
1641 	}
1642 
1643 	bridge->sysdata = pcie;
1644 	bridge->ops = &mvebu_pcie_ops;
1645 	bridge->child_ops = &mvebu_pcie_child_ops;
1646 	bridge->align_resource = mvebu_pcie_align_resource;
1647 	bridge->map_irq = mvebu_pcie_map_irq;
1648 
1649 	return pci_host_probe(bridge);
1650 }
1651 
1652 static void mvebu_pcie_remove(struct platform_device *pdev)
1653 {
1654 	struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
1655 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1656 	u32 cmd, sspl;
1657 	int i;
1658 
1659 	/* Remove PCI bus with all devices. */
1660 	pci_lock_rescan_remove();
1661 	pci_stop_root_bus(bridge->bus);
1662 	pci_remove_root_bus(bridge->bus);
1663 	pci_unlock_rescan_remove();
1664 
1665 	for (i = 0; i < pcie->nports; i++) {
1666 		struct mvebu_pcie_port *port = &pcie->ports[i];
1667 		int irq = port->intx_irq;
1668 
1669 		if (!port->base)
1670 			continue;
1671 
1672 		/* Disable Root Bridge I/O space, memory space and bus mastering. */
1673 		cmd = mvebu_readl(port, PCIE_CMD_OFF);
1674 		cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1675 		mvebu_writel(port, cmd, PCIE_CMD_OFF);
1676 
1677 		/* Mask all interrupt sources. */
1678 		mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
1679 
1680 		/* Clear all interrupt causes. */
1681 		mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
1682 
1683 		if (irq > 0)
1684 			irq_set_chained_handler_and_data(irq, NULL, NULL);
1685 
1686 		/* Remove IRQ domains. */
1687 		if (port->intx_irq_domain)
1688 			irq_domain_remove(port->intx_irq_domain);
1689 
1690 		/* Free config space for emulated root bridge. */
1691 		pci_bridge_emul_cleanup(&port->bridge);
1692 
1693 		/* Disable sending Set_Slot_Power_Limit PCIe Message. */
1694 		sspl = mvebu_readl(port, PCIE_SSPL_OFF);
1695 		sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
1696 		mvebu_writel(port, sspl, PCIE_SSPL_OFF);
1697 
1698 		/* Disable and clear BARs and windows. */
1699 		mvebu_pcie_disable_wins(port);
1700 
1701 		/* Delete PCIe IO and MEM windows. */
1702 		if (port->iowin.size)
1703 			mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
1704 		if (port->memwin.size)
1705 			mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
1706 
1707 		/* Power down card and disable clocks. Must be the last step. */
1708 		mvebu_pcie_powerdown(port);
1709 	}
1710 }
1711 
1712 static const struct of_device_id mvebu_pcie_of_match_table[] = {
1713 	{ .compatible = "marvell,armada-xp-pcie", },
1714 	{ .compatible = "marvell,armada-370-pcie", },
1715 	{ .compatible = "marvell,dove-pcie", },
1716 	{ .compatible = "marvell,kirkwood-pcie", },
1717 	{},
1718 };
1719 
1720 static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1721 	NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1722 };
1723 
1724 static struct platform_driver mvebu_pcie_driver = {
1725 	.driver = {
1726 		.name = "mvebu-pcie",
1727 		.of_match_table = mvebu_pcie_of_match_table,
1728 		.pm = &mvebu_pcie_pm_ops,
1729 	},
1730 	.probe = mvebu_pcie_probe,
1731 	.remove_new = mvebu_pcie_remove,
1732 };
1733 module_platform_driver(mvebu_pcie_driver);
1734 
1735 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
1736 MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
1737 MODULE_DESCRIPTION("Marvell EBU PCIe controller");
1738 MODULE_LICENSE("GPL v2");
1739