1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe host bridge driver for Apple system-on-chips.
4  *
5  * The HW is ECAM compliant, so once the controller is initialized,
6  * the driver mostly deals MSI mapping and handling of per-port
7  * interrupts (INTx, management and error signals).
8  *
9  * Initialization requires enabling power and clocks, along with a
10  * number of register pokes.
11  *
12  * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
13  * Copyright (C) 2021 Google LLC
14  * Copyright (C) 2021 Corellium LLC
15  * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
16  *
17  * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
18  * Author: Marc Zyngier <maz@kernel.org>
19  */
20 
21 #include <linux/gpio/consumer.h>
22 #include <linux/kernel.h>
23 #include <linux/iopoll.h>
24 #include <linux/irqchip/chained_irq.h>
25 #include <linux/irqdomain.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/msi.h>
29 #include <linux/notifier.h>
30 #include <linux/of_irq.h>
31 #include <linux/pci-ecam.h>
32 
33 #define CORE_RC_PHYIF_CTL		0x00024
34 #define   CORE_RC_PHYIF_CTL_RUN		BIT(0)
35 #define CORE_RC_PHYIF_STAT		0x00028
36 #define   CORE_RC_PHYIF_STAT_REFCLK	BIT(4)
37 #define CORE_RC_CTL			0x00050
38 #define   CORE_RC_CTL_RUN		BIT(0)
39 #define CORE_RC_STAT			0x00058
40 #define   CORE_RC_STAT_READY		BIT(0)
41 #define CORE_FABRIC_STAT		0x04000
42 #define   CORE_FABRIC_STAT_MASK		0x001F001F
43 #define CORE_LANE_CFG(port)		(0x84000 + 0x4000 * (port))
44 #define   CORE_LANE_CFG_REFCLK0REQ	BIT(0)
45 #define   CORE_LANE_CFG_REFCLK1		BIT(1)
46 #define   CORE_LANE_CFG_REFCLK0ACK	BIT(2)
47 #define   CORE_LANE_CFG_REFCLKEN	(BIT(9) | BIT(10))
48 #define CORE_LANE_CTL(port)		(0x84004 + 0x4000 * (port))
49 #define   CORE_LANE_CTL_CFGACC		BIT(15)
50 
51 #define PORT_LTSSMCTL			0x00080
52 #define   PORT_LTSSMCTL_START		BIT(0)
53 #define PORT_INTSTAT			0x00100
54 #define   PORT_INT_TUNNEL_ERR		31
55 #define   PORT_INT_CPL_TIMEOUT		23
56 #define   PORT_INT_RID2SID_MAPERR	22
57 #define   PORT_INT_CPL_ABORT		21
58 #define   PORT_INT_MSI_BAD_DATA		19
59 #define   PORT_INT_MSI_ERR		18
60 #define   PORT_INT_REQADDR_GT32		17
61 #define   PORT_INT_AF_TIMEOUT		15
62 #define   PORT_INT_LINK_DOWN		14
63 #define   PORT_INT_LINK_UP		12
64 #define   PORT_INT_LINK_BWMGMT		11
65 #define   PORT_INT_AER_MASK		(15 << 4)
66 #define   PORT_INT_PORT_ERR		4
67 #define   PORT_INT_INTx(i)		i
68 #define   PORT_INT_INTx_MASK		15
69 #define PORT_INTMSK			0x00104
70 #define PORT_INTMSKSET			0x00108
71 #define PORT_INTMSKCLR			0x0010c
72 #define PORT_MSICFG			0x00124
73 #define   PORT_MSICFG_EN		BIT(0)
74 #define   PORT_MSICFG_L2MSINUM_SHIFT	4
75 #define PORT_MSIBASE			0x00128
76 #define   PORT_MSIBASE_1_SHIFT		16
77 #define PORT_MSIADDR			0x00168
78 #define PORT_LINKSTS			0x00208
79 #define   PORT_LINKSTS_UP		BIT(0)
80 #define   PORT_LINKSTS_BUSY		BIT(2)
81 #define PORT_LINKCMDSTS			0x00210
82 #define PORT_OUTS_NPREQS		0x00284
83 #define   PORT_OUTS_NPREQS_REQ		BIT(24)
84 #define   PORT_OUTS_NPREQS_CPL		BIT(16)
85 #define PORT_RXWR_FIFO			0x00288
86 #define   PORT_RXWR_FIFO_HDR		GENMASK(15, 10)
87 #define   PORT_RXWR_FIFO_DATA		GENMASK(9, 0)
88 #define PORT_RXRD_FIFO			0x0028C
89 #define   PORT_RXRD_FIFO_REQ		GENMASK(6, 0)
90 #define PORT_OUTS_CPLS			0x00290
91 #define   PORT_OUTS_CPLS_SHRD		GENMASK(14, 8)
92 #define   PORT_OUTS_CPLS_WAIT		GENMASK(6, 0)
93 #define PORT_APPCLK			0x00800
94 #define   PORT_APPCLK_EN		BIT(0)
95 #define   PORT_APPCLK_CGDIS		BIT(8)
96 #define PORT_STATUS			0x00804
97 #define   PORT_STATUS_READY		BIT(0)
98 #define PORT_REFCLK			0x00810
99 #define   PORT_REFCLK_EN		BIT(0)
100 #define   PORT_REFCLK_CGDIS		BIT(8)
101 #define PORT_PERST			0x00814
102 #define   PORT_PERST_OFF		BIT(0)
103 #define PORT_RID2SID(i16)		(0x00828 + 4 * (i16))
104 #define   PORT_RID2SID_VALID		BIT(31)
105 #define   PORT_RID2SID_SID_SHIFT	16
106 #define   PORT_RID2SID_BUS_SHIFT	8
107 #define   PORT_RID2SID_DEV_SHIFT	3
108 #define   PORT_RID2SID_FUNC_SHIFT	0
109 #define PORT_OUTS_PREQS_HDR		0x00980
110 #define   PORT_OUTS_PREQS_HDR_MASK	GENMASK(9, 0)
111 #define PORT_OUTS_PREQS_DATA		0x00984
112 #define   PORT_OUTS_PREQS_DATA_MASK	GENMASK(15, 0)
113 #define PORT_TUNCTRL			0x00988
114 #define   PORT_TUNCTRL_PERST_ON		BIT(0)
115 #define   PORT_TUNCTRL_PERST_ACK_REQ	BIT(1)
116 #define PORT_TUNSTAT			0x0098c
117 #define   PORT_TUNSTAT_PERST_ON		BIT(0)
118 #define   PORT_TUNSTAT_PERST_ACK_PEND	BIT(1)
119 #define PORT_PREFMEM_ENABLE		0x00994
120 
121 #define MAX_RID2SID			64
122 
123 /*
124  * The doorbell address is set to 0xfffff000, which by convention
125  * matches what MacOS does, and it is possible to use any other
126  * address (in the bottom 4GB, as the base register is only 32bit).
127  * However, it has to be excluded from the IOVA range, and the DART
128  * driver has to know about it.
129  */
130 #define DOORBELL_ADDR		CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
131 
132 struct apple_pcie {
133 	struct mutex		lock;
134 	struct device		*dev;
135 	void __iomem            *base;
136 	struct irq_domain	*domain;
137 	unsigned long		*bitmap;
138 	struct list_head	ports;
139 	struct completion	event;
140 	struct irq_fwspec	fwspec;
141 	u32			nvecs;
142 };
143 
144 struct apple_pcie_port {
145 	struct apple_pcie	*pcie;
146 	struct device_node	*np;
147 	void __iomem		*base;
148 	struct irq_domain	*domain;
149 	struct list_head	entry;
150 	DECLARE_BITMAP(sid_map, MAX_RID2SID);
151 	int			sid_map_sz;
152 	int			idx;
153 };
154 
155 static void rmw_set(u32 set, void __iomem *addr)
156 {
157 	writel_relaxed(readl_relaxed(addr) | set, addr);
158 }
159 
160 static void rmw_clear(u32 clr, void __iomem *addr)
161 {
162 	writel_relaxed(readl_relaxed(addr) & ~clr, addr);
163 }
164 
165 static void apple_msi_top_irq_mask(struct irq_data *d)
166 {
167 	pci_msi_mask_irq(d);
168 	irq_chip_mask_parent(d);
169 }
170 
171 static void apple_msi_top_irq_unmask(struct irq_data *d)
172 {
173 	pci_msi_unmask_irq(d);
174 	irq_chip_unmask_parent(d);
175 }
176 
177 static struct irq_chip apple_msi_top_chip = {
178 	.name			= "PCIe MSI",
179 	.irq_mask		= apple_msi_top_irq_mask,
180 	.irq_unmask		= apple_msi_top_irq_unmask,
181 	.irq_eoi		= irq_chip_eoi_parent,
182 	.irq_set_affinity	= irq_chip_set_affinity_parent,
183 	.irq_set_type		= irq_chip_set_type_parent,
184 };
185 
186 static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
187 {
188 	msg->address_hi = upper_32_bits(DOORBELL_ADDR);
189 	msg->address_lo = lower_32_bits(DOORBELL_ADDR);
190 	msg->data = data->hwirq;
191 }
192 
193 static struct irq_chip apple_msi_bottom_chip = {
194 	.name			= "MSI",
195 	.irq_mask		= irq_chip_mask_parent,
196 	.irq_unmask		= irq_chip_unmask_parent,
197 	.irq_eoi		= irq_chip_eoi_parent,
198 	.irq_set_affinity	= irq_chip_set_affinity_parent,
199 	.irq_set_type		= irq_chip_set_type_parent,
200 	.irq_compose_msi_msg	= apple_msi_compose_msg,
201 };
202 
203 static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
204 				  unsigned int nr_irqs, void *args)
205 {
206 	struct apple_pcie *pcie = domain->host_data;
207 	struct irq_fwspec fwspec = pcie->fwspec;
208 	unsigned int i;
209 	int ret, hwirq;
210 
211 	mutex_lock(&pcie->lock);
212 
213 	hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
214 					order_base_2(nr_irqs));
215 
216 	mutex_unlock(&pcie->lock);
217 
218 	if (hwirq < 0)
219 		return -ENOSPC;
220 
221 	fwspec.param[1] += hwirq;
222 
223 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
224 	if (ret)
225 		return ret;
226 
227 	for (i = 0; i < nr_irqs; i++) {
228 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
229 					      &apple_msi_bottom_chip,
230 					      domain->host_data);
231 	}
232 
233 	return 0;
234 }
235 
236 static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
237 				  unsigned int nr_irqs)
238 {
239 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
240 	struct apple_pcie *pcie = domain->host_data;
241 
242 	mutex_lock(&pcie->lock);
243 
244 	bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
245 
246 	mutex_unlock(&pcie->lock);
247 }
248 
249 static const struct irq_domain_ops apple_msi_domain_ops = {
250 	.alloc	= apple_msi_domain_alloc,
251 	.free	= apple_msi_domain_free,
252 };
253 
254 static struct msi_domain_info apple_msi_info = {
255 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
256 		   MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
257 	.chip	= &apple_msi_top_chip,
258 };
259 
260 static void apple_port_irq_mask(struct irq_data *data)
261 {
262 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
263 
264 	writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
265 }
266 
267 static void apple_port_irq_unmask(struct irq_data *data)
268 {
269 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
270 
271 	writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
272 }
273 
274 static bool hwirq_is_intx(unsigned int hwirq)
275 {
276 	return BIT(hwirq) & PORT_INT_INTx_MASK;
277 }
278 
279 static void apple_port_irq_ack(struct irq_data *data)
280 {
281 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
282 
283 	if (!hwirq_is_intx(data->hwirq))
284 		writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
285 }
286 
287 static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
288 {
289 	/*
290 	 * It doesn't seem that there is any way to configure the
291 	 * trigger, so assume INTx have to be level (as per the spec),
292 	 * and the rest is edge (which looks likely).
293 	 */
294 	if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
295 		return -EINVAL;
296 
297 	irqd_set_trigger_type(data, type);
298 	return 0;
299 }
300 
301 static struct irq_chip apple_port_irqchip = {
302 	.name		= "PCIe",
303 	.irq_ack	= apple_port_irq_ack,
304 	.irq_mask	= apple_port_irq_mask,
305 	.irq_unmask	= apple_port_irq_unmask,
306 	.irq_set_type	= apple_port_irq_set_type,
307 };
308 
309 static int apple_port_irq_domain_alloc(struct irq_domain *domain,
310 				       unsigned int virq, unsigned int nr_irqs,
311 				       void *args)
312 {
313 	struct apple_pcie_port *port = domain->host_data;
314 	struct irq_fwspec *fwspec = args;
315 	int i;
316 
317 	for (i = 0; i < nr_irqs; i++) {
318 		irq_flow_handler_t flow = handle_edge_irq;
319 		unsigned int type = IRQ_TYPE_EDGE_RISING;
320 
321 		if (hwirq_is_intx(fwspec->param[0] + i)) {
322 			flow = handle_level_irq;
323 			type = IRQ_TYPE_LEVEL_HIGH;
324 		}
325 
326 		irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
327 				    &apple_port_irqchip, port, flow,
328 				    NULL, NULL);
329 
330 		irq_set_irq_type(virq + i, type);
331 	}
332 
333 	return 0;
334 }
335 
336 static void apple_port_irq_domain_free(struct irq_domain *domain,
337 				       unsigned int virq, unsigned int nr_irqs)
338 {
339 	int i;
340 
341 	for (i = 0; i < nr_irqs; i++) {
342 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
343 
344 		irq_set_handler(virq + i, NULL);
345 		irq_domain_reset_irq_data(d);
346 	}
347 }
348 
349 static const struct irq_domain_ops apple_port_irq_domain_ops = {
350 	.translate	= irq_domain_translate_onecell,
351 	.alloc		= apple_port_irq_domain_alloc,
352 	.free		= apple_port_irq_domain_free,
353 };
354 
355 static void apple_port_irq_handler(struct irq_desc *desc)
356 {
357 	struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
358 	struct irq_chip *chip = irq_desc_get_chip(desc);
359 	unsigned long stat;
360 	int i;
361 
362 	chained_irq_enter(chip, desc);
363 
364 	stat = readl_relaxed(port->base + PORT_INTSTAT);
365 
366 	for_each_set_bit(i, &stat, 32)
367 		generic_handle_domain_irq(port->domain, i);
368 
369 	chained_irq_exit(chip, desc);
370 }
371 
372 static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
373 {
374 	struct fwnode_handle *fwnode = &port->np->fwnode;
375 	unsigned int irq;
376 
377 	/* FIXME: consider moving each interrupt under each port */
378 	irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
379 				   port->idx);
380 	if (!irq)
381 		return -ENXIO;
382 
383 	port->domain = irq_domain_create_linear(fwnode, 32,
384 						&apple_port_irq_domain_ops,
385 						port);
386 	if (!port->domain)
387 		return -ENOMEM;
388 
389 	/* Disable all interrupts */
390 	writel_relaxed(~0, port->base + PORT_INTMSKSET);
391 	writel_relaxed(~0, port->base + PORT_INTSTAT);
392 
393 	irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
394 
395 	/* Configure MSI base address */
396 	BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
397 	writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
398 
399 	/* Enable MSIs, shared between all ports */
400 	writel_relaxed(0, port->base + PORT_MSIBASE);
401 	writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
402 		       PORT_MSICFG_EN, port->base + PORT_MSICFG);
403 
404 	return 0;
405 }
406 
407 static irqreturn_t apple_pcie_port_irq(int irq, void *data)
408 {
409 	struct apple_pcie_port *port = data;
410 	unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
411 
412 	switch (hwirq) {
413 	case PORT_INT_LINK_UP:
414 		dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
415 				     port->np);
416 		complete_all(&port->pcie->event);
417 		break;
418 	case PORT_INT_LINK_DOWN:
419 		dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
420 				     port->np);
421 		break;
422 	default:
423 		return IRQ_NONE;
424 	}
425 
426 	return IRQ_HANDLED;
427 }
428 
429 static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
430 {
431 	static struct {
432 		unsigned int	hwirq;
433 		const char	*name;
434 	} port_irqs[] = {
435 		{ PORT_INT_LINK_UP,	"Link up",	},
436 		{ PORT_INT_LINK_DOWN,	"Link down",	},
437 	};
438 	int i;
439 
440 	for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
441 		struct irq_fwspec fwspec = {
442 			.fwnode		= &port->np->fwnode,
443 			.param_count	= 1,
444 			.param		= {
445 				[0]	= port_irqs[i].hwirq,
446 			},
447 		};
448 		unsigned int irq;
449 		int ret;
450 
451 		irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
452 					    &fwspec);
453 		if (WARN_ON(!irq))
454 			continue;
455 
456 		ret = request_irq(irq, apple_pcie_port_irq, 0,
457 				  port_irqs[i].name, port);
458 		WARN_ON(ret);
459 	}
460 
461 	return 0;
462 }
463 
464 static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
465 				   struct apple_pcie_port *port)
466 {
467 	u32 stat;
468 	int res;
469 
470 	res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
471 					 stat & CORE_RC_PHYIF_STAT_REFCLK,
472 					 100, 50000);
473 	if (res < 0)
474 		return res;
475 
476 	rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
477 	rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
478 
479 	res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
480 					 stat, stat & CORE_LANE_CFG_REFCLK0ACK,
481 					 100, 50000);
482 	if (res < 0)
483 		return res;
484 
485 	rmw_set(CORE_LANE_CFG_REFCLK1, pcie->base + CORE_LANE_CFG(port->idx));
486 	res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
487 					 stat, stat & CORE_LANE_CFG_REFCLK1,
488 					 100, 50000);
489 
490 	if (res < 0)
491 		return res;
492 
493 	rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
494 
495 	rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
496 	rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
497 
498 	return 0;
499 }
500 
501 static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
502 				    int idx, u32 val)
503 {
504 	writel_relaxed(val, port->base + PORT_RID2SID(idx));
505 	/* Read back to ensure completion of the write */
506 	return readl_relaxed(port->base + PORT_RID2SID(idx));
507 }
508 
509 static int apple_pcie_setup_port(struct apple_pcie *pcie,
510 				 struct device_node *np)
511 {
512 	struct platform_device *platform = to_platform_device(pcie->dev);
513 	struct apple_pcie_port *port;
514 	struct gpio_desc *reset;
515 	u32 stat, idx;
516 	int ret, i;
517 
518 	reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
519 				       GPIOD_OUT_LOW, "#PERST");
520 	if (IS_ERR(reset))
521 		return PTR_ERR(reset);
522 
523 	port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
524 	if (!port)
525 		return -ENOMEM;
526 
527 	ret = of_property_read_u32_index(np, "reg", 0, &idx);
528 	if (ret)
529 		return ret;
530 
531 	/* Use the first reg entry to work out the port index */
532 	port->idx = idx >> 11;
533 	port->pcie = pcie;
534 	port->np = np;
535 
536 	port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
537 	if (IS_ERR(port->base))
538 		return PTR_ERR(port->base);
539 
540 	rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
541 
542 	ret = apple_pcie_setup_refclk(pcie, port);
543 	if (ret < 0)
544 		return ret;
545 
546 	rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
547 	gpiod_set_value(reset, 1);
548 
549 	ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
550 					 stat & PORT_STATUS_READY, 100, 250000);
551 	if (ret < 0) {
552 		dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
553 		return ret;
554 	}
555 
556 	ret = apple_pcie_port_setup_irq(port);
557 	if (ret)
558 		return ret;
559 
560 	/* Reset all RID/SID mappings, and check for RAZ/WI registers */
561 	for (i = 0; i < MAX_RID2SID; i++) {
562 		if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
563 			break;
564 		apple_pcie_rid2sid_write(port, i, 0);
565 	}
566 
567 	dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
568 
569 	port->sid_map_sz = i;
570 
571 	list_add_tail(&port->entry, &pcie->ports);
572 	init_completion(&pcie->event);
573 
574 	ret = apple_pcie_port_register_irqs(port);
575 	WARN_ON(ret);
576 
577 	writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
578 
579 	if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
580 		dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
581 
582 	return 0;
583 }
584 
585 static int apple_msi_init(struct apple_pcie *pcie)
586 {
587 	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
588 	struct of_phandle_args args = {};
589 	struct irq_domain *parent;
590 	int ret;
591 
592 	ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
593 					 "#interrupt-cells", 0, &args);
594 	if (ret)
595 		return ret;
596 
597 	ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
598 					 args.args_count + 1, &pcie->nvecs);
599 	if (ret)
600 		return ret;
601 
602 	of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
603 				  &pcie->fwspec);
604 
605 	pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
606 	if (!pcie->bitmap)
607 		return -ENOMEM;
608 
609 	parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
610 	if (!parent) {
611 		dev_err(pcie->dev, "failed to find parent domain\n");
612 		return -ENXIO;
613 	}
614 
615 	parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
616 					     &apple_msi_domain_ops, pcie);
617 	if (!parent) {
618 		dev_err(pcie->dev, "failed to create IRQ domain\n");
619 		return -ENOMEM;
620 	}
621 	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
622 
623 	pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
624 						 parent);
625 	if (!pcie->domain) {
626 		dev_err(pcie->dev, "failed to create MSI domain\n");
627 		irq_domain_remove(parent);
628 		return -ENOMEM;
629 	}
630 
631 	return 0;
632 }
633 
634 static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
635 {
636 	struct pci_config_window *cfg = pdev->sysdata;
637 	struct apple_pcie *pcie = cfg->priv;
638 	struct pci_dev *port_pdev;
639 	struct apple_pcie_port *port;
640 
641 	/* Find the root port this device is on */
642 	port_pdev = pcie_find_root_port(pdev);
643 
644 	/* If finding the port itself, nothing to do */
645 	if (WARN_ON(!port_pdev) || pdev == port_pdev)
646 		return NULL;
647 
648 	list_for_each_entry(port, &pcie->ports, entry) {
649 		if (port->idx == PCI_SLOT(port_pdev->devfn))
650 			return port;
651 	}
652 
653 	return NULL;
654 }
655 
656 static int apple_pcie_add_device(struct apple_pcie_port *port,
657 				 struct pci_dev *pdev)
658 {
659 	u32 sid, rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
660 	int idx, err;
661 
662 	dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
663 		pci_name(pdev->bus->self), port->idx);
664 
665 	err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
666 			"iommu-map-mask", NULL, &sid);
667 	if (err)
668 		return err;
669 
670 	mutex_lock(&port->pcie->lock);
671 
672 	idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
673 	if (idx >= 0) {
674 		apple_pcie_rid2sid_write(port, idx,
675 					 PORT_RID2SID_VALID |
676 					 (sid << PORT_RID2SID_SID_SHIFT) | rid);
677 
678 		dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
679 			rid, sid, idx);
680 	}
681 
682 	mutex_unlock(&port->pcie->lock);
683 
684 	return idx >= 0 ? 0 : -ENOSPC;
685 }
686 
687 static void apple_pcie_release_device(struct apple_pcie_port *port,
688 				      struct pci_dev *pdev)
689 {
690 	u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
691 	int idx;
692 
693 	mutex_lock(&port->pcie->lock);
694 
695 	for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
696 		u32 val;
697 
698 		val = readl_relaxed(port->base + PORT_RID2SID(idx));
699 		if ((val & 0xffff) == rid) {
700 			apple_pcie_rid2sid_write(port, idx, 0);
701 			bitmap_release_region(port->sid_map, idx, 0);
702 			dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
703 			break;
704 		}
705 	}
706 
707 	mutex_unlock(&port->pcie->lock);
708 }
709 
710 static int apple_pcie_bus_notifier(struct notifier_block *nb,
711 				   unsigned long action,
712 				   void *data)
713 {
714 	struct device *dev = data;
715 	struct pci_dev *pdev = to_pci_dev(dev);
716 	struct apple_pcie_port *port;
717 	int err;
718 
719 	/*
720 	 * This is a bit ugly. We assume that if we get notified for
721 	 * any PCI device, we must be in charge of it, and that there
722 	 * is no other PCI controller in the whole system. It probably
723 	 * holds for now, but who knows for how long?
724 	 */
725 	port = apple_pcie_get_port(pdev);
726 	if (!port)
727 		return NOTIFY_DONE;
728 
729 	switch (action) {
730 	case BUS_NOTIFY_ADD_DEVICE:
731 		err = apple_pcie_add_device(port, pdev);
732 		if (err)
733 			return notifier_from_errno(err);
734 		break;
735 	case BUS_NOTIFY_DEL_DEVICE:
736 		apple_pcie_release_device(port, pdev);
737 		break;
738 	default:
739 		return NOTIFY_DONE;
740 	}
741 
742 	return NOTIFY_OK;
743 }
744 
745 static struct notifier_block apple_pcie_nb = {
746 	.notifier_call = apple_pcie_bus_notifier,
747 };
748 
749 static int apple_pcie_init(struct pci_config_window *cfg)
750 {
751 	struct device *dev = cfg->parent;
752 	struct platform_device *platform = to_platform_device(dev);
753 	struct device_node *of_port;
754 	struct apple_pcie *pcie;
755 	int ret;
756 
757 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
758 	if (!pcie)
759 		return -ENOMEM;
760 
761 	pcie->dev = dev;
762 
763 	mutex_init(&pcie->lock);
764 
765 	pcie->base = devm_platform_ioremap_resource(platform, 1);
766 	if (IS_ERR(pcie->base))
767 		return PTR_ERR(pcie->base);
768 
769 	cfg->priv = pcie;
770 	INIT_LIST_HEAD(&pcie->ports);
771 
772 	for_each_child_of_node(dev->of_node, of_port) {
773 		ret = apple_pcie_setup_port(pcie, of_port);
774 		if (ret) {
775 			dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
776 			of_node_put(of_port);
777 			return ret;
778 		}
779 	}
780 
781 	return apple_msi_init(pcie);
782 }
783 
784 static int apple_pcie_probe(struct platform_device *pdev)
785 {
786 	int ret;
787 
788 	ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
789 	if (ret)
790 		return ret;
791 
792 	ret = pci_host_common_probe(pdev);
793 	if (ret)
794 		bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
795 
796 	return ret;
797 }
798 
799 static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
800 	.init		= apple_pcie_init,
801 	.pci_ops	= {
802 		.map_bus	= pci_ecam_map_bus,
803 		.read		= pci_generic_config_read,
804 		.write		= pci_generic_config_write,
805 	}
806 };
807 
808 static const struct of_device_id apple_pcie_of_match[] = {
809 	{ .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
810 	{ }
811 };
812 MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
813 
814 static struct platform_driver apple_pcie_driver = {
815 	.probe	= apple_pcie_probe,
816 	.driver	= {
817 		.name			= "pcie-apple",
818 		.of_match_table		= apple_pcie_of_match,
819 		.suppress_bind_attrs	= true,
820 	},
821 };
822 module_platform_driver(apple_pcie_driver);
823 
824 MODULE_LICENSE("GPL v2");
825