xref: /openbmc/linux/drivers/pci/of.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCI <-> OF mapping helpers
4  *
5  * Copyright 2011 IBM Corp.
6  */
7 #define pr_fmt(fmt)	"PCI: OF: " fmt
8 
9 #include <linux/irqdomain.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/of.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/of_pci.h>
16 #include "pci.h"
17 
18 void pci_set_of_node(struct pci_dev *dev)
19 {
20 	if (!dev->bus->dev.of_node)
21 		return;
22 	dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
23 						    dev->devfn);
24 }
25 
26 void pci_release_of_node(struct pci_dev *dev)
27 {
28 	of_node_put(dev->dev.of_node);
29 	dev->dev.of_node = NULL;
30 }
31 
32 void pci_set_bus_of_node(struct pci_bus *bus)
33 {
34 	if (bus->self == NULL)
35 		bus->dev.of_node = pcibios_get_phb_of_node(bus);
36 	else
37 		bus->dev.of_node = of_node_get(bus->self->dev.of_node);
38 }
39 
40 void pci_release_bus_of_node(struct pci_bus *bus)
41 {
42 	of_node_put(bus->dev.of_node);
43 	bus->dev.of_node = NULL;
44 }
45 
46 struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
47 {
48 	/* This should only be called for PHBs */
49 	if (WARN_ON(bus->self || bus->parent))
50 		return NULL;
51 
52 	/*
53 	 * Look for a node pointer in either the intermediary device we
54 	 * create above the root bus or its own parent. Normally only
55 	 * the later is populated.
56 	 */
57 	if (bus->bridge->of_node)
58 		return of_node_get(bus->bridge->of_node);
59 	if (bus->bridge->parent && bus->bridge->parent->of_node)
60 		return of_node_get(bus->bridge->parent->of_node);
61 	return NULL;
62 }
63 
64 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
65 {
66 #ifdef CONFIG_IRQ_DOMAIN
67 	struct irq_domain *d;
68 
69 	if (!bus->dev.of_node)
70 		return NULL;
71 
72 	/* Start looking for a phandle to an MSI controller. */
73 	d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
74 	if (d)
75 		return d;
76 
77 	/*
78 	 * If we don't have an msi-parent property, look for a domain
79 	 * directly attached to the host bridge.
80 	 */
81 	d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
82 	if (d)
83 		return d;
84 
85 	return irq_find_host(bus->dev.of_node);
86 #else
87 	return NULL;
88 #endif
89 }
90 
91 static inline int __of_pci_pci_compare(struct device_node *node,
92 				       unsigned int data)
93 {
94 	int devfn;
95 
96 	devfn = of_pci_get_devfn(node);
97 	if (devfn < 0)
98 		return 0;
99 
100 	return devfn == data;
101 }
102 
103 struct device_node *of_pci_find_child_device(struct device_node *parent,
104 					     unsigned int devfn)
105 {
106 	struct device_node *node, *node2;
107 
108 	for_each_child_of_node(parent, node) {
109 		if (__of_pci_pci_compare(node, devfn))
110 			return node;
111 		/*
112 		 * Some OFs create a parent node "multifunc-device" as
113 		 * a fake root for all functions of a multi-function
114 		 * device we go down them as well.
115 		 */
116 		if (!strcmp(node->name, "multifunc-device")) {
117 			for_each_child_of_node(node, node2) {
118 				if (__of_pci_pci_compare(node2, devfn)) {
119 					of_node_put(node);
120 					return node2;
121 				}
122 			}
123 		}
124 	}
125 	return NULL;
126 }
127 EXPORT_SYMBOL_GPL(of_pci_find_child_device);
128 
129 /**
130  * of_pci_get_devfn() - Get device and function numbers for a device node
131  * @np: device node
132  *
133  * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
134  * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
135  * and function numbers respectively. On error a negative error code is
136  * returned.
137  */
138 int of_pci_get_devfn(struct device_node *np)
139 {
140 	u32 reg[5];
141 	int error;
142 
143 	error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
144 	if (error)
145 		return error;
146 
147 	return (reg[0] >> 8) & 0xff;
148 }
149 EXPORT_SYMBOL_GPL(of_pci_get_devfn);
150 
151 /**
152  * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
153  * @node: device node
154  * @res: address to a struct resource to return the bus-range
155  *
156  * Returns 0 on success or a negative error-code on failure.
157  */
158 int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
159 {
160 	u32 bus_range[2];
161 	int error;
162 
163 	error = of_property_read_u32_array(node, "bus-range", bus_range,
164 					   ARRAY_SIZE(bus_range));
165 	if (error)
166 		return error;
167 
168 	res->name = node->name;
169 	res->start = bus_range[0];
170 	res->end = bus_range[1];
171 	res->flags = IORESOURCE_BUS;
172 
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
176 
177 /**
178  * This function will try to obtain the host bridge domain number by
179  * finding a property called "linux,pci-domain" of the given device node.
180  *
181  * @node: device tree node with the domain information
182  *
183  * Returns the associated domain number from DT in the range [0-0xffff], or
184  * a negative value if the required property is not found.
185  */
186 int of_get_pci_domain_nr(struct device_node *node)
187 {
188 	u32 domain;
189 	int error;
190 
191 	error = of_property_read_u32(node, "linux,pci-domain", &domain);
192 	if (error)
193 		return error;
194 
195 	return (u16)domain;
196 }
197 EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
198 
199 /**
200  * This function will try to find the limitation of link speed by finding
201  * a property called "max-link-speed" of the given device node.
202  *
203  * @node: device tree node with the max link speed information
204  *
205  * Returns the associated max link speed from DT, or a negative value if the
206  * required property is not found or is invalid.
207  */
208 int of_pci_get_max_link_speed(struct device_node *node)
209 {
210 	u32 max_link_speed;
211 
212 	if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
213 	    max_link_speed > 4)
214 		return -EINVAL;
215 
216 	return max_link_speed;
217 }
218 EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
219 
220 /**
221  * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
222  *                           is present and valid
223  */
224 void of_pci_check_probe_only(void)
225 {
226 	u32 val;
227 	int ret;
228 
229 	ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
230 	if (ret) {
231 		if (ret == -ENODATA || ret == -EOVERFLOW)
232 			pr_warn("linux,pci-probe-only without valid value, ignoring\n");
233 		return;
234 	}
235 
236 	if (val)
237 		pci_add_flags(PCI_PROBE_ONLY);
238 	else
239 		pci_clear_flags(PCI_PROBE_ONLY);
240 
241 	pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
242 }
243 EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
244 
245 #if defined(CONFIG_OF_ADDRESS)
246 /**
247  * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
248  *                                           host bridge resources from DT
249  * @dev: host bridge device
250  * @busno: bus number associated with the bridge root bus
251  * @bus_max: maximum number of buses for this bridge
252  * @resources: list where the range of resources will be added after DT parsing
253  * @io_base: pointer to a variable that will contain on return the physical
254  * address for the start of the I/O range. Can be NULL if the caller doesn't
255  * expect I/O ranges to be present in the device tree.
256  *
257  * This function will parse the "ranges" property of a PCI host bridge device
258  * node and setup the resource mapping based on its content. It is expected
259  * that the property conforms with the Power ePAPR document.
260  *
261  * It returns zero if the range parsing has been successful or a standard error
262  * value if it failed.
263  */
264 int devm_of_pci_get_host_bridge_resources(struct device *dev,
265 			unsigned char busno, unsigned char bus_max,
266 			struct list_head *resources, resource_size_t *io_base)
267 {
268 	struct device_node *dev_node = dev->of_node;
269 	struct resource *res;
270 	struct resource *bus_range;
271 	struct of_pci_range range;
272 	struct of_pci_range_parser parser;
273 	char range_type[4];
274 	int err;
275 
276 	if (io_base)
277 		*io_base = (resource_size_t)OF_BAD_ADDR;
278 
279 	bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
280 	if (!bus_range)
281 		return -ENOMEM;
282 
283 	dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
284 
285 	err = of_pci_parse_bus_range(dev_node, bus_range);
286 	if (err) {
287 		bus_range->start = busno;
288 		bus_range->end = bus_max;
289 		bus_range->flags = IORESOURCE_BUS;
290 		dev_info(dev, "  No bus range found for %pOF, using %pR\n",
291 			 dev_node, bus_range);
292 	} else {
293 		if (bus_range->end > bus_range->start + bus_max)
294 			bus_range->end = bus_range->start + bus_max;
295 	}
296 	pci_add_resource(resources, bus_range);
297 
298 	/* Check for ranges property */
299 	err = of_pci_range_parser_init(&parser, dev_node);
300 	if (err)
301 		goto failed;
302 
303 	dev_dbg(dev, "Parsing ranges property...\n");
304 	for_each_of_pci_range(&parser, &range) {
305 		/* Read next ranges element */
306 		if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
307 			snprintf(range_type, 4, " IO");
308 		else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
309 			snprintf(range_type, 4, "MEM");
310 		else
311 			snprintf(range_type, 4, "err");
312 		dev_info(dev, "  %s %#010llx..%#010llx -> %#010llx\n",
313 			 range_type, range.cpu_addr,
314 			 range.cpu_addr + range.size - 1, range.pci_addr);
315 
316 		/*
317 		 * If we failed translation or got a zero-sized region
318 		 * then skip this range
319 		 */
320 		if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
321 			continue;
322 
323 		res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
324 		if (!res) {
325 			err = -ENOMEM;
326 			goto failed;
327 		}
328 
329 		err = of_pci_range_to_resource(&range, dev_node, res);
330 		if (err) {
331 			devm_kfree(dev, res);
332 			continue;
333 		}
334 
335 		if (resource_type(res) == IORESOURCE_IO) {
336 			if (!io_base) {
337 				dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
338 					dev_node);
339 				err = -EINVAL;
340 				goto failed;
341 			}
342 			if (*io_base != (resource_size_t)OF_BAD_ADDR)
343 				dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
344 					 dev_node);
345 			*io_base = range.cpu_addr;
346 		}
347 
348 		pci_add_resource_offset(resources, res,	res->start - range.pci_addr);
349 	}
350 
351 	return 0;
352 
353 failed:
354 	pci_free_resource_list(resources);
355 	return err;
356 }
357 EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
358 #endif /* CONFIG_OF_ADDRESS */
359 
360 /**
361  * of_pci_map_rid - Translate a requester ID through a downstream mapping.
362  * @np: root complex device node.
363  * @rid: PCI requester ID to map.
364  * @map_name: property name of the map to use.
365  * @map_mask_name: optional property name of the mask to use.
366  * @target: optional pointer to a target device node.
367  * @id_out: optional pointer to receive the translated ID.
368  *
369  * Given a PCI requester ID, look up the appropriate implementation-defined
370  * platform ID and/or the target device which receives transactions on that
371  * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
372  * @id_out may be NULL if only the other is required. If @target points to
373  * a non-NULL device node pointer, only entries targeting that node will be
374  * matched; if it points to a NULL value, it will receive the device node of
375  * the first matching target phandle, with a reference held.
376  *
377  * Return: 0 on success or a standard error code on failure.
378  */
379 int of_pci_map_rid(struct device_node *np, u32 rid,
380 		   const char *map_name, const char *map_mask_name,
381 		   struct device_node **target, u32 *id_out)
382 {
383 	u32 map_mask, masked_rid;
384 	int map_len;
385 	const __be32 *map = NULL;
386 
387 	if (!np || !map_name || (!target && !id_out))
388 		return -EINVAL;
389 
390 	map = of_get_property(np, map_name, &map_len);
391 	if (!map) {
392 		if (target)
393 			return -ENODEV;
394 		/* Otherwise, no map implies no translation */
395 		*id_out = rid;
396 		return 0;
397 	}
398 
399 	if (!map_len || map_len % (4 * sizeof(*map))) {
400 		pr_err("%pOF: Error: Bad %s length: %d\n", np,
401 			map_name, map_len);
402 		return -EINVAL;
403 	}
404 
405 	/* The default is to select all bits. */
406 	map_mask = 0xffffffff;
407 
408 	/*
409 	 * Can be overridden by "{iommu,msi}-map-mask" property.
410 	 * If of_property_read_u32() fails, the default is used.
411 	 */
412 	if (map_mask_name)
413 		of_property_read_u32(np, map_mask_name, &map_mask);
414 
415 	masked_rid = map_mask & rid;
416 	for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
417 		struct device_node *phandle_node;
418 		u32 rid_base = be32_to_cpup(map + 0);
419 		u32 phandle = be32_to_cpup(map + 1);
420 		u32 out_base = be32_to_cpup(map + 2);
421 		u32 rid_len = be32_to_cpup(map + 3);
422 
423 		if (rid_base & ~map_mask) {
424 			pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
425 				np, map_name, map_name,
426 				map_mask, rid_base);
427 			return -EFAULT;
428 		}
429 
430 		if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
431 			continue;
432 
433 		phandle_node = of_find_node_by_phandle(phandle);
434 		if (!phandle_node)
435 			return -ENODEV;
436 
437 		if (target) {
438 			if (*target)
439 				of_node_put(phandle_node);
440 			else
441 				*target = phandle_node;
442 
443 			if (*target != phandle_node)
444 				continue;
445 		}
446 
447 		if (id_out)
448 			*id_out = masked_rid - rid_base + out_base;
449 
450 		pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
451 			np, map_name, map_mask, rid_base, out_base,
452 			rid_len, rid, masked_rid - rid_base + out_base);
453 		return 0;
454 	}
455 
456 	pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
457 		np, map_name, rid, target && *target ? *target : NULL);
458 	return -EFAULT;
459 }
460 
461 #if IS_ENABLED(CONFIG_OF_IRQ)
462 /**
463  * of_irq_parse_pci - Resolve the interrupt for a PCI device
464  * @pdev:       the device whose interrupt is to be resolved
465  * @out_irq:    structure of_irq filled by this function
466  *
467  * This function resolves the PCI interrupt for a given PCI device. If a
468  * device-node exists for a given pci_dev, it will use normal OF tree
469  * walking. If not, it will implement standard swizzling and walk up the
470  * PCI tree until an device-node is found, at which point it will finish
471  * resolving using the OF tree walking.
472  */
473 static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
474 {
475 	struct device_node *dn, *ppnode;
476 	struct pci_dev *ppdev;
477 	__be32 laddr[3];
478 	u8 pin;
479 	int rc;
480 
481 	/*
482 	 * Check if we have a device node, if yes, fallback to standard
483 	 * device tree parsing
484 	 */
485 	dn = pci_device_to_OF_node(pdev);
486 	if (dn) {
487 		rc = of_irq_parse_one(dn, 0, out_irq);
488 		if (!rc)
489 			return rc;
490 	}
491 
492 	/*
493 	 * Ok, we don't, time to have fun. Let's start by building up an
494 	 * interrupt spec.  we assume #interrupt-cells is 1, which is standard
495 	 * for PCI. If you do different, then don't use that routine.
496 	 */
497 	rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
498 	if (rc != 0)
499 		goto err;
500 	/* No pin, exit with no error message. */
501 	if (pin == 0)
502 		return -ENODEV;
503 
504 	/* Now we walk up the PCI tree */
505 	for (;;) {
506 		/* Get the pci_dev of our parent */
507 		ppdev = pdev->bus->self;
508 
509 		/* Ouch, it's a host bridge... */
510 		if (ppdev == NULL) {
511 			ppnode = pci_bus_to_OF_node(pdev->bus);
512 
513 			/* No node for host bridge ? give up */
514 			if (ppnode == NULL) {
515 				rc = -EINVAL;
516 				goto err;
517 			}
518 		} else {
519 			/* We found a P2P bridge, check if it has a node */
520 			ppnode = pci_device_to_OF_node(ppdev);
521 		}
522 
523 		/*
524 		 * Ok, we have found a parent with a device-node, hand over to
525 		 * the OF parsing code.
526 		 * We build a unit address from the linux device to be used for
527 		 * resolution. Note that we use the linux bus number which may
528 		 * not match your firmware bus numbering.
529 		 * Fortunately, in most cases, interrupt-map-mask doesn't
530 		 * include the bus number as part of the matching.
531 		 * You should still be careful about that though if you intend
532 		 * to rely on this function (you ship a firmware that doesn't
533 		 * create device nodes for all PCI devices).
534 		 */
535 		if (ppnode)
536 			break;
537 
538 		/*
539 		 * We can only get here if we hit a P2P bridge with no node;
540 		 * let's do standard swizzling and try again
541 		 */
542 		pin = pci_swizzle_interrupt_pin(pdev, pin);
543 		pdev = ppdev;
544 	}
545 
546 	out_irq->np = ppnode;
547 	out_irq->args_count = 1;
548 	out_irq->args[0] = pin;
549 	laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
550 	laddr[1] = laddr[2] = cpu_to_be32(0);
551 	rc = of_irq_parse_raw(laddr, out_irq);
552 	if (rc)
553 		goto err;
554 	return 0;
555 err:
556 	if (rc == -ENOENT) {
557 		dev_warn(&pdev->dev,
558 			"%s: no interrupt-map found, INTx interrupts not available\n",
559 			__func__);
560 		pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
561 			__func__);
562 	} else {
563 		dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
564 	}
565 	return rc;
566 }
567 
568 /**
569  * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
570  * @dev: The PCI device needing an IRQ
571  * @slot: PCI slot number; passed when used as map_irq callback. Unused
572  * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
573  *
574  * @slot and @pin are unused, but included in the function so that this
575  * function can be used directly as the map_irq callback to
576  * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
577  */
578 int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
579 {
580 	struct of_phandle_args oirq;
581 	int ret;
582 
583 	ret = of_irq_parse_pci(dev, &oirq);
584 	if (ret)
585 		return 0; /* Proper return code 0 == NO_IRQ */
586 
587 	return irq_create_of_mapping(&oirq);
588 }
589 EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
590 #endif	/* CONFIG_OF_IRQ */
591 
592 int pci_parse_request_of_pci_ranges(struct device *dev,
593 				    struct list_head *resources,
594 				    struct resource **bus_range)
595 {
596 	int err, res_valid = 0;
597 	resource_size_t iobase;
598 	struct resource_entry *win, *tmp;
599 
600 	INIT_LIST_HEAD(resources);
601 	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
602 						    &iobase);
603 	if (err)
604 		return err;
605 
606 	err = devm_request_pci_bus_resources(dev, resources);
607 	if (err)
608 		goto out_release_res;
609 
610 	resource_list_for_each_entry_safe(win, tmp, resources) {
611 		struct resource *res = win->res;
612 
613 		switch (resource_type(res)) {
614 		case IORESOURCE_IO:
615 			err = pci_remap_iospace(res, iobase);
616 			if (err) {
617 				dev_warn(dev, "error %d: failed to map resource %pR\n",
618 					 err, res);
619 				resource_list_destroy_entry(win);
620 			}
621 			break;
622 		case IORESOURCE_MEM:
623 			res_valid |= !(res->flags & IORESOURCE_PREFETCH);
624 			break;
625 		case IORESOURCE_BUS:
626 			if (bus_range)
627 				*bus_range = res;
628 			break;
629 		}
630 	}
631 
632 	if (res_valid)
633 		return 0;
634 
635 	dev_err(dev, "non-prefetchable memory resource required\n");
636 	err = -EINVAL;
637 
638  out_release_res:
639 	pci_free_resource_list(resources);
640 	return err;
641 }
642 
643