xref: /openbmc/linux/drivers/of/address.c (revision 34facb04)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt)	"OF: " fmt
3 
4 #include <linux/device.h>
5 #include <linux/fwnode.h>
6 #include <linux/io.h>
7 #include <linux/ioport.h>
8 #include <linux/logic_pio.h>
9 #include <linux/module.h>
10 #include <linux/of_address.h>
11 #include <linux/pci.h>
12 #include <linux/pci_regs.h>
13 #include <linux/sizes.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 
17 #include "of_private.h"
18 
19 /* Max address size we deal with */
20 #define OF_MAX_ADDR_CELLS	4
21 #define OF_CHECK_ADDR_COUNT(na)	((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
22 #define OF_CHECK_COUNTS(na, ns)	(OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
23 
24 static struct of_bus *of_match_bus(struct device_node *np);
25 static int __of_address_to_resource(struct device_node *dev,
26 		const __be32 *addrp, u64 size, unsigned int flags,
27 		const char *name, struct resource *r);
28 
29 /* Debug utility */
30 #ifdef DEBUG
31 static void of_dump_addr(const char *s, const __be32 *addr, int na)
32 {
33 	pr_debug("%s", s);
34 	while (na--)
35 		pr_cont(" %08x", be32_to_cpu(*(addr++)));
36 	pr_cont("\n");
37 }
38 #else
39 static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
40 #endif
41 
42 /* Callbacks for bus specific translators */
43 struct of_bus {
44 	const char	*name;
45 	const char	*addresses;
46 	int		(*match)(struct device_node *parent);
47 	void		(*count_cells)(struct device_node *child,
48 				       int *addrc, int *sizec);
49 	u64		(*map)(__be32 *addr, const __be32 *range,
50 				int na, int ns, int pna);
51 	int		(*translate)(__be32 *addr, u64 offset, int na);
52 	unsigned int	(*get_flags)(const __be32 *addr);
53 };
54 
55 /*
56  * Default translator (generic bus)
57  */
58 
59 static void of_bus_default_count_cells(struct device_node *dev,
60 				       int *addrc, int *sizec)
61 {
62 	if (addrc)
63 		*addrc = of_n_addr_cells(dev);
64 	if (sizec)
65 		*sizec = of_n_size_cells(dev);
66 }
67 
68 static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
69 		int na, int ns, int pna)
70 {
71 	u64 cp, s, da;
72 
73 	cp = of_read_number(range, na);
74 	s  = of_read_number(range + na + pna, ns);
75 	da = of_read_number(addr, na);
76 
77 	pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
78 		 (unsigned long long)cp, (unsigned long long)s,
79 		 (unsigned long long)da);
80 
81 	if (da < cp || da >= (cp + s))
82 		return OF_BAD_ADDR;
83 	return da - cp;
84 }
85 
86 static int of_bus_default_translate(__be32 *addr, u64 offset, int na)
87 {
88 	u64 a = of_read_number(addr, na);
89 	memset(addr, 0, na * 4);
90 	a += offset;
91 	if (na > 1)
92 		addr[na - 2] = cpu_to_be32(a >> 32);
93 	addr[na - 1] = cpu_to_be32(a & 0xffffffffu);
94 
95 	return 0;
96 }
97 
98 static unsigned int of_bus_default_get_flags(const __be32 *addr)
99 {
100 	return IORESOURCE_MEM;
101 }
102 
103 static unsigned int of_bus_pci_get_flags(const __be32 *addr)
104 {
105 	unsigned int flags = 0;
106 	u32 w = be32_to_cpup(addr);
107 
108 	if (!IS_ENABLED(CONFIG_PCI))
109 		return 0;
110 
111 	switch((w >> 24) & 0x03) {
112 	case 0x01:
113 		flags |= IORESOURCE_IO;
114 		break;
115 	case 0x02: /* 32 bits */
116 	case 0x03: /* 64 bits */
117 		flags |= IORESOURCE_MEM;
118 		break;
119 	}
120 	if (w & 0x40000000)
121 		flags |= IORESOURCE_PREFETCH;
122 	return flags;
123 }
124 
125 #ifdef CONFIG_PCI
126 /*
127  * PCI bus specific translator
128  */
129 
130 static int of_bus_pci_match(struct device_node *np)
131 {
132 	/*
133  	 * "pciex" is PCI Express
134 	 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
135 	 * "ht" is hypertransport
136 	 */
137 	return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
138 		of_node_is_type(np, "vci") || of_node_is_type(np, "ht");
139 }
140 
141 static void of_bus_pci_count_cells(struct device_node *np,
142 				   int *addrc, int *sizec)
143 {
144 	if (addrc)
145 		*addrc = 3;
146 	if (sizec)
147 		*sizec = 2;
148 }
149 
150 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
151 		int pna)
152 {
153 	u64 cp, s, da;
154 	unsigned int af, rf;
155 
156 	af = of_bus_pci_get_flags(addr);
157 	rf = of_bus_pci_get_flags(range);
158 
159 	/* Check address type match */
160 	if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
161 		return OF_BAD_ADDR;
162 
163 	/* Read address values, skipping high cell */
164 	cp = of_read_number(range + 1, na - 1);
165 	s  = of_read_number(range + na + pna, ns);
166 	da = of_read_number(addr + 1, na - 1);
167 
168 	pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n",
169 		 (unsigned long long)cp, (unsigned long long)s,
170 		 (unsigned long long)da);
171 
172 	if (da < cp || da >= (cp + s))
173 		return OF_BAD_ADDR;
174 	return da - cp;
175 }
176 
177 static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
178 {
179 	return of_bus_default_translate(addr + 1, offset, na - 1);
180 }
181 
182 const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
183 			unsigned int *flags)
184 {
185 	const __be32 *prop;
186 	unsigned int psize;
187 	struct device_node *parent;
188 	struct of_bus *bus;
189 	int onesize, i, na, ns;
190 
191 	/* Get parent & match bus type */
192 	parent = of_get_parent(dev);
193 	if (parent == NULL)
194 		return NULL;
195 	bus = of_match_bus(parent);
196 	if (strcmp(bus->name, "pci")) {
197 		of_node_put(parent);
198 		return NULL;
199 	}
200 	bus->count_cells(dev, &na, &ns);
201 	of_node_put(parent);
202 	if (!OF_CHECK_ADDR_COUNT(na))
203 		return NULL;
204 
205 	/* Get "reg" or "assigned-addresses" property */
206 	prop = of_get_property(dev, bus->addresses, &psize);
207 	if (prop == NULL)
208 		return NULL;
209 	psize /= 4;
210 
211 	onesize = na + ns;
212 	for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
213 		u32 val = be32_to_cpu(prop[0]);
214 		if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
215 			if (size)
216 				*size = of_read_number(prop + na, ns);
217 			if (flags)
218 				*flags = bus->get_flags(prop);
219 			return prop;
220 		}
221 	}
222 	return NULL;
223 }
224 EXPORT_SYMBOL(of_get_pci_address);
225 
226 int of_pci_address_to_resource(struct device_node *dev, int bar,
227 			       struct resource *r)
228 {
229 	const __be32	*addrp;
230 	u64		size;
231 	unsigned int	flags;
232 
233 	addrp = of_get_pci_address(dev, bar, &size, &flags);
234 	if (addrp == NULL)
235 		return -EINVAL;
236 	return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
237 }
238 EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
239 
240 /*
241  * of_pci_range_to_resource - Create a resource from an of_pci_range
242  * @range:	the PCI range that describes the resource
243  * @np:		device node where the range belongs to
244  * @res:	pointer to a valid resource that will be updated to
245  *              reflect the values contained in the range.
246  *
247  * Returns EINVAL if the range cannot be converted to resource.
248  *
249  * Note that if the range is an IO range, the resource will be converted
250  * using pci_address_to_pio() which can fail if it is called too early or
251  * if the range cannot be matched to any host bridge IO space (our case here).
252  * To guard against that we try to register the IO range first.
253  * If that fails we know that pci_address_to_pio() will do too.
254  */
255 int of_pci_range_to_resource(struct of_pci_range *range,
256 			     struct device_node *np, struct resource *res)
257 {
258 	int err;
259 	res->flags = range->flags;
260 	res->parent = res->child = res->sibling = NULL;
261 	res->name = np->full_name;
262 
263 	if (res->flags & IORESOURCE_IO) {
264 		unsigned long port;
265 		err = pci_register_io_range(&np->fwnode, range->cpu_addr,
266 				range->size);
267 		if (err)
268 			goto invalid_range;
269 		port = pci_address_to_pio(range->cpu_addr);
270 		if (port == (unsigned long)-1) {
271 			err = -EINVAL;
272 			goto invalid_range;
273 		}
274 		res->start = port;
275 	} else {
276 		if ((sizeof(resource_size_t) < 8) &&
277 		    upper_32_bits(range->cpu_addr)) {
278 			err = -EINVAL;
279 			goto invalid_range;
280 		}
281 
282 		res->start = range->cpu_addr;
283 	}
284 	res->end = res->start + range->size - 1;
285 	return 0;
286 
287 invalid_range:
288 	res->start = (resource_size_t)OF_BAD_ADDR;
289 	res->end = (resource_size_t)OF_BAD_ADDR;
290 	return err;
291 }
292 EXPORT_SYMBOL(of_pci_range_to_resource);
293 #endif /* CONFIG_PCI */
294 
295 /*
296  * ISA bus specific translator
297  */
298 
299 static int of_bus_isa_match(struct device_node *np)
300 {
301 	return of_node_name_eq(np, "isa");
302 }
303 
304 static void of_bus_isa_count_cells(struct device_node *child,
305 				   int *addrc, int *sizec)
306 {
307 	if (addrc)
308 		*addrc = 2;
309 	if (sizec)
310 		*sizec = 1;
311 }
312 
313 static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
314 		int pna)
315 {
316 	u64 cp, s, da;
317 
318 	/* Check address type match */
319 	if ((addr[0] ^ range[0]) & cpu_to_be32(1))
320 		return OF_BAD_ADDR;
321 
322 	/* Read address values, skipping high cell */
323 	cp = of_read_number(range + 1, na - 1);
324 	s  = of_read_number(range + na + pna, ns);
325 	da = of_read_number(addr + 1, na - 1);
326 
327 	pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n",
328 		 (unsigned long long)cp, (unsigned long long)s,
329 		 (unsigned long long)da);
330 
331 	if (da < cp || da >= (cp + s))
332 		return OF_BAD_ADDR;
333 	return da - cp;
334 }
335 
336 static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
337 {
338 	return of_bus_default_translate(addr + 1, offset, na - 1);
339 }
340 
341 static unsigned int of_bus_isa_get_flags(const __be32 *addr)
342 {
343 	unsigned int flags = 0;
344 	u32 w = be32_to_cpup(addr);
345 
346 	if (w & 1)
347 		flags |= IORESOURCE_IO;
348 	else
349 		flags |= IORESOURCE_MEM;
350 	return flags;
351 }
352 
353 /*
354  * Array of bus specific translators
355  */
356 
357 static struct of_bus of_busses[] = {
358 #ifdef CONFIG_PCI
359 	/* PCI */
360 	{
361 		.name = "pci",
362 		.addresses = "assigned-addresses",
363 		.match = of_bus_pci_match,
364 		.count_cells = of_bus_pci_count_cells,
365 		.map = of_bus_pci_map,
366 		.translate = of_bus_pci_translate,
367 		.get_flags = of_bus_pci_get_flags,
368 	},
369 #endif /* CONFIG_PCI */
370 	/* ISA */
371 	{
372 		.name = "isa",
373 		.addresses = "reg",
374 		.match = of_bus_isa_match,
375 		.count_cells = of_bus_isa_count_cells,
376 		.map = of_bus_isa_map,
377 		.translate = of_bus_isa_translate,
378 		.get_flags = of_bus_isa_get_flags,
379 	},
380 	/* Default */
381 	{
382 		.name = "default",
383 		.addresses = "reg",
384 		.match = NULL,
385 		.count_cells = of_bus_default_count_cells,
386 		.map = of_bus_default_map,
387 		.translate = of_bus_default_translate,
388 		.get_flags = of_bus_default_get_flags,
389 	},
390 };
391 
392 static struct of_bus *of_match_bus(struct device_node *np)
393 {
394 	int i;
395 
396 	for (i = 0; i < ARRAY_SIZE(of_busses); i++)
397 		if (!of_busses[i].match || of_busses[i].match(np))
398 			return &of_busses[i];
399 	BUG();
400 	return NULL;
401 }
402 
403 static int of_empty_ranges_quirk(struct device_node *np)
404 {
405 	if (IS_ENABLED(CONFIG_PPC)) {
406 		/* To save cycles, we cache the result for global "Mac" setting */
407 		static int quirk_state = -1;
408 
409 		/* PA-SEMI sdc DT bug */
410 		if (of_device_is_compatible(np, "1682m-sdc"))
411 			return true;
412 
413 		/* Make quirk cached */
414 		if (quirk_state < 0)
415 			quirk_state =
416 				of_machine_is_compatible("Power Macintosh") ||
417 				of_machine_is_compatible("MacRISC");
418 		return quirk_state;
419 	}
420 	return false;
421 }
422 
423 static int of_translate_one(struct device_node *parent, struct of_bus *bus,
424 			    struct of_bus *pbus, __be32 *addr,
425 			    int na, int ns, int pna, const char *rprop)
426 {
427 	const __be32 *ranges;
428 	unsigned int rlen;
429 	int rone;
430 	u64 offset = OF_BAD_ADDR;
431 
432 	/*
433 	 * Normally, an absence of a "ranges" property means we are
434 	 * crossing a non-translatable boundary, and thus the addresses
435 	 * below the current cannot be converted to CPU physical ones.
436 	 * Unfortunately, while this is very clear in the spec, it's not
437 	 * what Apple understood, and they do have things like /uni-n or
438 	 * /ht nodes with no "ranges" property and a lot of perfectly
439 	 * useable mapped devices below them. Thus we treat the absence of
440 	 * "ranges" as equivalent to an empty "ranges" property which means
441 	 * a 1:1 translation at that level. It's up to the caller not to try
442 	 * to translate addresses that aren't supposed to be translated in
443 	 * the first place. --BenH.
444 	 *
445 	 * As far as we know, this damage only exists on Apple machines, so
446 	 * This code is only enabled on powerpc. --gcl
447 	 *
448 	 * This quirk also applies for 'dma-ranges' which frequently exist in
449 	 * child nodes without 'dma-ranges' in the parent nodes. --RobH
450 	 */
451 	ranges = of_get_property(parent, rprop, &rlen);
452 	if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
453 	    strcmp(rprop, "dma-ranges")) {
454 		pr_debug("no ranges; cannot translate\n");
455 		return 1;
456 	}
457 	if (ranges == NULL || rlen == 0) {
458 		offset = of_read_number(addr, na);
459 		memset(addr, 0, pna * 4);
460 		pr_debug("empty ranges; 1:1 translation\n");
461 		goto finish;
462 	}
463 
464 	pr_debug("walking ranges...\n");
465 
466 	/* Now walk through the ranges */
467 	rlen /= 4;
468 	rone = na + pna + ns;
469 	for (; rlen >= rone; rlen -= rone, ranges += rone) {
470 		offset = bus->map(addr, ranges, na, ns, pna);
471 		if (offset != OF_BAD_ADDR)
472 			break;
473 	}
474 	if (offset == OF_BAD_ADDR) {
475 		pr_debug("not found !\n");
476 		return 1;
477 	}
478 	memcpy(addr, ranges + na, 4 * pna);
479 
480  finish:
481 	of_dump_addr("parent translation for:", addr, pna);
482 	pr_debug("with offset: %llx\n", (unsigned long long)offset);
483 
484 	/* Translate it into parent bus space */
485 	return pbus->translate(addr, offset, pna);
486 }
487 
488 /*
489  * Translate an address from the device-tree into a CPU physical address,
490  * this walks up the tree and applies the various bus mappings on the
491  * way.
492  *
493  * Note: We consider that crossing any level with #size-cells == 0 to mean
494  * that translation is impossible (that is we are not dealing with a value
495  * that can be mapped to a cpu physical address). This is not really specified
496  * that way, but this is traditionally the way IBM at least do things
497  *
498  * Whenever the translation fails, the *host pointer will be set to the
499  * device that had registered logical PIO mapping, and the return code is
500  * relative to that node.
501  */
502 static u64 __of_translate_address(struct device_node *dev,
503 				  struct device_node *(*get_parent)(const struct device_node *),
504 				  const __be32 *in_addr, const char *rprop,
505 				  struct device_node **host)
506 {
507 	struct device_node *parent = NULL;
508 	struct of_bus *bus, *pbus;
509 	__be32 addr[OF_MAX_ADDR_CELLS];
510 	int na, ns, pna, pns;
511 	u64 result = OF_BAD_ADDR;
512 
513 	pr_debug("** translation for device %pOF **\n", dev);
514 
515 	/* Increase refcount at current level */
516 	of_node_get(dev);
517 
518 	*host = NULL;
519 	/* Get parent & match bus type */
520 	parent = get_parent(dev);
521 	if (parent == NULL)
522 		goto bail;
523 	bus = of_match_bus(parent);
524 
525 	/* Count address cells & copy address locally */
526 	bus->count_cells(dev, &na, &ns);
527 	if (!OF_CHECK_COUNTS(na, ns)) {
528 		pr_debug("Bad cell count for %pOF\n", dev);
529 		goto bail;
530 	}
531 	memcpy(addr, in_addr, na * 4);
532 
533 	pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n",
534 	    bus->name, na, ns, parent);
535 	of_dump_addr("translating address:", addr, na);
536 
537 	/* Translate */
538 	for (;;) {
539 		struct logic_pio_hwaddr *iorange;
540 
541 		/* Switch to parent bus */
542 		of_node_put(dev);
543 		dev = parent;
544 		parent = get_parent(dev);
545 
546 		/* If root, we have finished */
547 		if (parent == NULL) {
548 			pr_debug("reached root node\n");
549 			result = of_read_number(addr, na);
550 			break;
551 		}
552 
553 		/*
554 		 * For indirectIO device which has no ranges property, get
555 		 * the address from reg directly.
556 		 */
557 		iorange = find_io_range_by_fwnode(&dev->fwnode);
558 		if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) {
559 			result = of_read_number(addr + 1, na - 1);
560 			pr_debug("indirectIO matched(%pOF) 0x%llx\n",
561 				 dev, result);
562 			*host = of_node_get(dev);
563 			break;
564 		}
565 
566 		/* Get new parent bus and counts */
567 		pbus = of_match_bus(parent);
568 		pbus->count_cells(dev, &pna, &pns);
569 		if (!OF_CHECK_COUNTS(pna, pns)) {
570 			pr_err("Bad cell count for %pOF\n", dev);
571 			break;
572 		}
573 
574 		pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n",
575 		    pbus->name, pna, pns, parent);
576 
577 		/* Apply bus translation */
578 		if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
579 			break;
580 
581 		/* Complete the move up one level */
582 		na = pna;
583 		ns = pns;
584 		bus = pbus;
585 
586 		of_dump_addr("one level translation:", addr, na);
587 	}
588  bail:
589 	of_node_put(parent);
590 	of_node_put(dev);
591 
592 	return result;
593 }
594 
595 u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
596 {
597 	struct device_node *host;
598 	u64 ret;
599 
600 	ret = __of_translate_address(dev, of_get_parent,
601 				     in_addr, "ranges", &host);
602 	if (host) {
603 		of_node_put(host);
604 		return OF_BAD_ADDR;
605 	}
606 
607 	return ret;
608 }
609 EXPORT_SYMBOL(of_translate_address);
610 
611 static struct device_node *__of_get_dma_parent(const struct device_node *np)
612 {
613 	struct of_phandle_args args;
614 	int ret, index;
615 
616 	index = of_property_match_string(np, "interconnect-names", "dma-mem");
617 	if (index < 0)
618 		return of_get_parent(np);
619 
620 	ret = of_parse_phandle_with_args(np, "interconnects",
621 					 "#interconnect-cells",
622 					 index, &args);
623 	if (ret < 0)
624 		return of_get_parent(np);
625 
626 	return of_node_get(args.np);
627 }
628 
629 static struct device_node *of_get_next_dma_parent(struct device_node *np)
630 {
631 	struct device_node *parent;
632 
633 	parent = __of_get_dma_parent(np);
634 	of_node_put(np);
635 
636 	return parent;
637 }
638 
639 u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
640 {
641 	struct device_node *host;
642 	u64 ret;
643 
644 	ret = __of_translate_address(dev, __of_get_dma_parent,
645 				     in_addr, "dma-ranges", &host);
646 
647 	if (host) {
648 		of_node_put(host);
649 		return OF_BAD_ADDR;
650 	}
651 
652 	return ret;
653 }
654 EXPORT_SYMBOL(of_translate_dma_address);
655 
656 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
657 		    unsigned int *flags)
658 {
659 	const __be32 *prop;
660 	unsigned int psize;
661 	struct device_node *parent;
662 	struct of_bus *bus;
663 	int onesize, i, na, ns;
664 
665 	/* Get parent & match bus type */
666 	parent = of_get_parent(dev);
667 	if (parent == NULL)
668 		return NULL;
669 	bus = of_match_bus(parent);
670 	bus->count_cells(dev, &na, &ns);
671 	of_node_put(parent);
672 	if (!OF_CHECK_ADDR_COUNT(na))
673 		return NULL;
674 
675 	/* Get "reg" or "assigned-addresses" property */
676 	prop = of_get_property(dev, bus->addresses, &psize);
677 	if (prop == NULL)
678 		return NULL;
679 	psize /= 4;
680 
681 	onesize = na + ns;
682 	for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
683 		if (i == index) {
684 			if (size)
685 				*size = of_read_number(prop + na, ns);
686 			if (flags)
687 				*flags = bus->get_flags(prop);
688 			return prop;
689 		}
690 	return NULL;
691 }
692 EXPORT_SYMBOL(of_get_address);
693 
694 static int parser_init(struct of_pci_range_parser *parser,
695 			struct device_node *node, const char *name)
696 {
697 	int rlen;
698 
699 	parser->node = node;
700 	parser->pna = of_n_addr_cells(node);
701 	parser->na = of_bus_n_addr_cells(node);
702 	parser->ns = of_bus_n_size_cells(node);
703 	parser->dma = !strcmp(name, "dma-ranges");
704 
705 	parser->range = of_get_property(node, name, &rlen);
706 	if (parser->range == NULL)
707 		return -ENOENT;
708 
709 	parser->end = parser->range + rlen / sizeof(__be32);
710 
711 	return 0;
712 }
713 
714 int of_pci_range_parser_init(struct of_pci_range_parser *parser,
715 				struct device_node *node)
716 {
717 	return parser_init(parser, node, "ranges");
718 }
719 EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
720 
721 int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
722 				struct device_node *node)
723 {
724 	return parser_init(parser, node, "dma-ranges");
725 }
726 EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
727 #define of_dma_range_parser_init of_pci_dma_range_parser_init
728 
729 struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
730 						struct of_pci_range *range)
731 {
732 	int na = parser->na;
733 	int ns = parser->ns;
734 	int np = parser->pna + na + ns;
735 
736 	if (!range)
737 		return NULL;
738 
739 	if (!parser->range || parser->range + np > parser->end)
740 		return NULL;
741 
742 	if (parser->na == 3)
743 		range->flags = of_bus_pci_get_flags(parser->range);
744 	else
745 		range->flags = 0;
746 
747 	range->pci_addr = of_read_number(parser->range, na);
748 
749 	if (parser->dma)
750 		range->cpu_addr = of_translate_dma_address(parser->node,
751 				parser->range + na);
752 	else
753 		range->cpu_addr = of_translate_address(parser->node,
754 				parser->range + na);
755 	range->size = of_read_number(parser->range + parser->pna + na, ns);
756 
757 	parser->range += np;
758 
759 	/* Now consume following elements while they are contiguous */
760 	while (parser->range + np <= parser->end) {
761 		u32 flags = 0;
762 		u64 pci_addr, cpu_addr, size;
763 
764 		if (parser->na == 3)
765 			flags = of_bus_pci_get_flags(parser->range);
766 		pci_addr = of_read_number(parser->range, na);
767 		if (parser->dma)
768 			cpu_addr = of_translate_dma_address(parser->node,
769 					parser->range + na);
770 		else
771 			cpu_addr = of_translate_address(parser->node,
772 					parser->range + na);
773 		size = of_read_number(parser->range + parser->pna + na, ns);
774 
775 		if (flags != range->flags)
776 			break;
777 		if (pci_addr != range->pci_addr + range->size ||
778 		    cpu_addr != range->cpu_addr + range->size)
779 			break;
780 
781 		range->size += size;
782 		parser->range += np;
783 	}
784 
785 	return range;
786 }
787 EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
788 
789 static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
790 			u64 size)
791 {
792 	u64 taddr;
793 	unsigned long port;
794 	struct device_node *host;
795 
796 	taddr = __of_translate_address(dev, of_get_parent,
797 				       in_addr, "ranges", &host);
798 	if (host) {
799 		/* host-specific port access */
800 		port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size);
801 		of_node_put(host);
802 	} else {
803 		/* memory-mapped I/O range */
804 		port = pci_address_to_pio(taddr);
805 	}
806 
807 	if (port == (unsigned long)-1)
808 		return OF_BAD_ADDR;
809 
810 	return port;
811 }
812 
813 static int __of_address_to_resource(struct device_node *dev,
814 		const __be32 *addrp, u64 size, unsigned int flags,
815 		const char *name, struct resource *r)
816 {
817 	u64 taddr;
818 
819 	if (flags & IORESOURCE_MEM)
820 		taddr = of_translate_address(dev, addrp);
821 	else if (flags & IORESOURCE_IO)
822 		taddr = of_translate_ioport(dev, addrp, size);
823 	else
824 		return -EINVAL;
825 
826 	if (taddr == OF_BAD_ADDR)
827 		return -EINVAL;
828 	memset(r, 0, sizeof(struct resource));
829 
830 	r->start = taddr;
831 	r->end = taddr + size - 1;
832 	r->flags = flags;
833 	r->name = name ? name : dev->full_name;
834 
835 	return 0;
836 }
837 
838 /**
839  * of_address_to_resource - Translate device tree address and return as resource
840  *
841  * Note that if your address is a PIO address, the conversion will fail if
842  * the physical address can't be internally converted to an IO token with
843  * pci_address_to_pio(), that is because it's either called too early or it
844  * can't be matched to any host bridge IO space
845  */
846 int of_address_to_resource(struct device_node *dev, int index,
847 			   struct resource *r)
848 {
849 	const __be32	*addrp;
850 	u64		size;
851 	unsigned int	flags;
852 	const char	*name = NULL;
853 
854 	addrp = of_get_address(dev, index, &size, &flags);
855 	if (addrp == NULL)
856 		return -EINVAL;
857 
858 	/* Get optional "reg-names" property to add a name to a resource */
859 	of_property_read_string_index(dev, "reg-names",	index, &name);
860 
861 	return __of_address_to_resource(dev, addrp, size, flags, name, r);
862 }
863 EXPORT_SYMBOL_GPL(of_address_to_resource);
864 
865 /**
866  * of_iomap - Maps the memory mapped IO for a given device_node
867  * @device:	the device whose io range will be mapped
868  * @index:	index of the io range
869  *
870  * Returns a pointer to the mapped memory
871  */
872 void __iomem *of_iomap(struct device_node *np, int index)
873 {
874 	struct resource res;
875 
876 	if (of_address_to_resource(np, index, &res))
877 		return NULL;
878 
879 	return ioremap(res.start, resource_size(&res));
880 }
881 EXPORT_SYMBOL(of_iomap);
882 
883 /*
884  * of_io_request_and_map - Requests a resource and maps the memory mapped IO
885  *			   for a given device_node
886  * @device:	the device whose io range will be mapped
887  * @index:	index of the io range
888  * @name:	name "override" for the memory region request or NULL
889  *
890  * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
891  * error code on failure. Usage example:
892  *
893  *	base = of_io_request_and_map(node, 0, "foo");
894  *	if (IS_ERR(base))
895  *		return PTR_ERR(base);
896  */
897 void __iomem *of_io_request_and_map(struct device_node *np, int index,
898 				    const char *name)
899 {
900 	struct resource res;
901 	void __iomem *mem;
902 
903 	if (of_address_to_resource(np, index, &res))
904 		return IOMEM_ERR_PTR(-EINVAL);
905 
906 	if (!name)
907 		name = res.name;
908 	if (!request_mem_region(res.start, resource_size(&res), name))
909 		return IOMEM_ERR_PTR(-EBUSY);
910 
911 	mem = ioremap(res.start, resource_size(&res));
912 	if (!mem) {
913 		release_mem_region(res.start, resource_size(&res));
914 		return IOMEM_ERR_PTR(-ENOMEM);
915 	}
916 
917 	return mem;
918 }
919 EXPORT_SYMBOL(of_io_request_and_map);
920 
921 /**
922  * of_dma_get_range - Get DMA range info
923  * @np:		device node to get DMA range info
924  * @dma_addr:	pointer to store initial DMA address of DMA range
925  * @paddr:	pointer to store initial CPU address of DMA range
926  * @size:	pointer to store size of DMA range
927  *
928  * Look in bottom up direction for the first "dma-ranges" property
929  * and parse it.
930  *  dma-ranges format:
931  *	DMA addr (dma_addr)	: naddr cells
932  *	CPU addr (phys_addr_t)	: pna cells
933  *	size			: nsize cells
934  *
935  * It returns -ENODEV if "dma-ranges" property was not found
936  * for this device in DT.
937  */
938 int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
939 {
940 	struct device_node *node = of_node_get(np);
941 	const __be32 *ranges = NULL;
942 	int len;
943 	int ret = 0;
944 	bool found_dma_ranges = false;
945 	struct of_range_parser parser;
946 	struct of_range range;
947 	u64 dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
948 
949 	while (node) {
950 		ranges = of_get_property(node, "dma-ranges", &len);
951 
952 		/* Ignore empty ranges, they imply no translation required */
953 		if (ranges && len > 0)
954 			break;
955 
956 		/* Once we find 'dma-ranges', then a missing one is an error */
957 		if (found_dma_ranges && !ranges) {
958 			ret = -ENODEV;
959 			goto out;
960 		}
961 		found_dma_ranges = true;
962 
963 		node = of_get_next_dma_parent(node);
964 	}
965 
966 	if (!node || !ranges) {
967 		pr_debug("no dma-ranges found for node(%pOF)\n", np);
968 		ret = -ENODEV;
969 		goto out;
970 	}
971 
972 	of_dma_range_parser_init(&parser, node);
973 
974 	for_each_of_range(&parser, &range) {
975 		pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
976 			 range.bus_addr, range.cpu_addr, range.size);
977 
978 		if (dma_offset && range.cpu_addr - range.bus_addr != dma_offset) {
979 			pr_warn("Can't handle multiple dma-ranges with different offsets on node(%pOF)\n", node);
980 			/* Don't error out as we'd break some existing DTs */
981 			continue;
982 		}
983 		dma_offset = range.cpu_addr - range.bus_addr;
984 
985 		/* Take lower and upper limits */
986 		if (range.bus_addr < dma_start)
987 			dma_start = range.bus_addr;
988 		if (range.bus_addr + range.size > dma_end)
989 			dma_end = range.bus_addr + range.size;
990 	}
991 
992 	if (dma_start >= dma_end) {
993 		ret = -EINVAL;
994 		pr_debug("Invalid DMA ranges configuration on node(%pOF)\n",
995 			 node);
996 		goto out;
997 	}
998 
999 	*dma_addr = dma_start;
1000 	*size = dma_end - dma_start;
1001 	*paddr = dma_start + dma_offset;
1002 
1003 	pr_debug("final: dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
1004 		 *dma_addr, *paddr, *size);
1005 
1006 out:
1007 	of_node_put(node);
1008 
1009 	return ret;
1010 }
1011 
1012 /**
1013  * of_dma_is_coherent - Check if device is coherent
1014  * @np:	device node
1015  *
1016  * It returns true if "dma-coherent" property was found
1017  * for this device in the DT, or if DMA is coherent by
1018  * default for OF devices on the current platform.
1019  */
1020 bool of_dma_is_coherent(struct device_node *np)
1021 {
1022 	struct device_node *node = of_node_get(np);
1023 
1024 	if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
1025 		return true;
1026 
1027 	while (node) {
1028 		if (of_property_read_bool(node, "dma-coherent")) {
1029 			of_node_put(node);
1030 			return true;
1031 		}
1032 		node = of_get_next_dma_parent(node);
1033 	}
1034 	of_node_put(node);
1035 	return false;
1036 }
1037 EXPORT_SYMBOL_GPL(of_dma_is_coherent);
1038