1 2 #define pr_fmt(fmt) "OF: " fmt 3 4 #include <linux/device.h> 5 #include <linux/io.h> 6 #include <linux/ioport.h> 7 #include <linux/module.h> 8 #include <linux/of_address.h> 9 #include <linux/pci.h> 10 #include <linux/pci_regs.h> 11 #include <linux/sizes.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 15 /* Max address size we deal with */ 16 #define OF_MAX_ADDR_CELLS 4 17 #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS) 18 #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0) 19 20 static struct of_bus *of_match_bus(struct device_node *np); 21 static int __of_address_to_resource(struct device_node *dev, 22 const __be32 *addrp, u64 size, unsigned int flags, 23 const char *name, struct resource *r); 24 25 /* Debug utility */ 26 #ifdef DEBUG 27 static void of_dump_addr(const char *s, const __be32 *addr, int na) 28 { 29 pr_debug("%s", s); 30 while (na--) 31 pr_cont(" %08x", be32_to_cpu(*(addr++))); 32 pr_cont("\n"); 33 } 34 #else 35 static void of_dump_addr(const char *s, const __be32 *addr, int na) { } 36 #endif 37 38 /* Callbacks for bus specific translators */ 39 struct of_bus { 40 const char *name; 41 const char *addresses; 42 int (*match)(struct device_node *parent); 43 void (*count_cells)(struct device_node *child, 44 int *addrc, int *sizec); 45 u64 (*map)(__be32 *addr, const __be32 *range, 46 int na, int ns, int pna); 47 int (*translate)(__be32 *addr, u64 offset, int na); 48 unsigned int (*get_flags)(const __be32 *addr); 49 }; 50 51 /* 52 * Default translator (generic bus) 53 */ 54 55 static void of_bus_default_count_cells(struct device_node *dev, 56 int *addrc, int *sizec) 57 { 58 if (addrc) 59 *addrc = of_n_addr_cells(dev); 60 if (sizec) 61 *sizec = of_n_size_cells(dev); 62 } 63 64 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, 65 int na, int ns, int pna) 66 { 67 u64 cp, s, da; 68 69 cp = of_read_number(range, na); 70 s = of_read_number(range + na + pna, ns); 71 da = of_read_number(addr, na); 72 73 pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", 74 (unsigned long long)cp, (unsigned long long)s, 75 (unsigned long long)da); 76 77 if (da < cp || da >= (cp + s)) 78 return OF_BAD_ADDR; 79 return da - cp; 80 } 81 82 static int of_bus_default_translate(__be32 *addr, u64 offset, int na) 83 { 84 u64 a = of_read_number(addr, na); 85 memset(addr, 0, na * 4); 86 a += offset; 87 if (na > 1) 88 addr[na - 2] = cpu_to_be32(a >> 32); 89 addr[na - 1] = cpu_to_be32(a & 0xffffffffu); 90 91 return 0; 92 } 93 94 static unsigned int of_bus_default_get_flags(const __be32 *addr) 95 { 96 return IORESOURCE_MEM; 97 } 98 99 #ifdef CONFIG_OF_ADDRESS_PCI 100 /* 101 * PCI bus specific translator 102 */ 103 104 static int of_bus_pci_match(struct device_node *np) 105 { 106 /* 107 * "pciex" is PCI Express 108 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs 109 * "ht" is hypertransport 110 */ 111 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || 112 !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); 113 } 114 115 static void of_bus_pci_count_cells(struct device_node *np, 116 int *addrc, int *sizec) 117 { 118 if (addrc) 119 *addrc = 3; 120 if (sizec) 121 *sizec = 2; 122 } 123 124 static unsigned int of_bus_pci_get_flags(const __be32 *addr) 125 { 126 unsigned int flags = 0; 127 u32 w = be32_to_cpup(addr); 128 129 switch((w >> 24) & 0x03) { 130 case 0x01: 131 flags |= IORESOURCE_IO; 132 break; 133 case 0x02: /* 32 bits */ 134 case 0x03: /* 64 bits */ 135 flags |= IORESOURCE_MEM; 136 break; 137 } 138 if (w & 0x40000000) 139 flags |= IORESOURCE_PREFETCH; 140 return flags; 141 } 142 143 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, 144 int pna) 145 { 146 u64 cp, s, da; 147 unsigned int af, rf; 148 149 af = of_bus_pci_get_flags(addr); 150 rf = of_bus_pci_get_flags(range); 151 152 /* Check address type match */ 153 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) 154 return OF_BAD_ADDR; 155 156 /* Read address values, skipping high cell */ 157 cp = of_read_number(range + 1, na - 1); 158 s = of_read_number(range + na + pna, ns); 159 da = of_read_number(addr + 1, na - 1); 160 161 pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", 162 (unsigned long long)cp, (unsigned long long)s, 163 (unsigned long long)da); 164 165 if (da < cp || da >= (cp + s)) 166 return OF_BAD_ADDR; 167 return da - cp; 168 } 169 170 static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) 171 { 172 return of_bus_default_translate(addr + 1, offset, na - 1); 173 } 174 #endif /* CONFIG_OF_ADDRESS_PCI */ 175 176 #ifdef CONFIG_PCI 177 const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 178 unsigned int *flags) 179 { 180 const __be32 *prop; 181 unsigned int psize; 182 struct device_node *parent; 183 struct of_bus *bus; 184 int onesize, i, na, ns; 185 186 /* Get parent & match bus type */ 187 parent = of_get_parent(dev); 188 if (parent == NULL) 189 return NULL; 190 bus = of_match_bus(parent); 191 if (strcmp(bus->name, "pci")) { 192 of_node_put(parent); 193 return NULL; 194 } 195 bus->count_cells(dev, &na, &ns); 196 of_node_put(parent); 197 if (!OF_CHECK_ADDR_COUNT(na)) 198 return NULL; 199 200 /* Get "reg" or "assigned-addresses" property */ 201 prop = of_get_property(dev, bus->addresses, &psize); 202 if (prop == NULL) 203 return NULL; 204 psize /= 4; 205 206 onesize = na + ns; 207 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { 208 u32 val = be32_to_cpu(prop[0]); 209 if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { 210 if (size) 211 *size = of_read_number(prop + na, ns); 212 if (flags) 213 *flags = bus->get_flags(prop); 214 return prop; 215 } 216 } 217 return NULL; 218 } 219 EXPORT_SYMBOL(of_get_pci_address); 220 221 int of_pci_address_to_resource(struct device_node *dev, int bar, 222 struct resource *r) 223 { 224 const __be32 *addrp; 225 u64 size; 226 unsigned int flags; 227 228 addrp = of_get_pci_address(dev, bar, &size, &flags); 229 if (addrp == NULL) 230 return -EINVAL; 231 return __of_address_to_resource(dev, addrp, size, flags, NULL, r); 232 } 233 EXPORT_SYMBOL_GPL(of_pci_address_to_resource); 234 235 static int parser_init(struct of_pci_range_parser *parser, 236 struct device_node *node, const char *name) 237 { 238 const int na = 3, ns = 2; 239 int rlen; 240 241 parser->node = node; 242 parser->pna = of_n_addr_cells(node); 243 parser->np = parser->pna + na + ns; 244 245 parser->range = of_get_property(node, name, &rlen); 246 if (parser->range == NULL) 247 return -ENOENT; 248 249 parser->end = parser->range + rlen / sizeof(__be32); 250 251 return 0; 252 } 253 254 int of_pci_range_parser_init(struct of_pci_range_parser *parser, 255 struct device_node *node) 256 { 257 return parser_init(parser, node, "ranges"); 258 } 259 EXPORT_SYMBOL_GPL(of_pci_range_parser_init); 260 261 int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, 262 struct device_node *node) 263 { 264 return parser_init(parser, node, "dma-ranges"); 265 } 266 EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init); 267 268 struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, 269 struct of_pci_range *range) 270 { 271 const int na = 3, ns = 2; 272 273 if (!range) 274 return NULL; 275 276 if (!parser->range || parser->range + parser->np > parser->end) 277 return NULL; 278 279 range->pci_space = be32_to_cpup(parser->range); 280 range->flags = of_bus_pci_get_flags(parser->range); 281 range->pci_addr = of_read_number(parser->range + 1, ns); 282 range->cpu_addr = of_translate_address(parser->node, 283 parser->range + na); 284 range->size = of_read_number(parser->range + parser->pna + na, ns); 285 286 parser->range += parser->np; 287 288 /* Now consume following elements while they are contiguous */ 289 while (parser->range + parser->np <= parser->end) { 290 u32 flags; 291 u64 pci_addr, cpu_addr, size; 292 293 flags = of_bus_pci_get_flags(parser->range); 294 pci_addr = of_read_number(parser->range + 1, ns); 295 cpu_addr = of_translate_address(parser->node, 296 parser->range + na); 297 size = of_read_number(parser->range + parser->pna + na, ns); 298 299 if (flags != range->flags) 300 break; 301 if (pci_addr != range->pci_addr + range->size || 302 cpu_addr != range->cpu_addr + range->size) 303 break; 304 305 range->size += size; 306 parser->range += parser->np; 307 } 308 309 return range; 310 } 311 EXPORT_SYMBOL_GPL(of_pci_range_parser_one); 312 313 /* 314 * of_pci_range_to_resource - Create a resource from an of_pci_range 315 * @range: the PCI range that describes the resource 316 * @np: device node where the range belongs to 317 * @res: pointer to a valid resource that will be updated to 318 * reflect the values contained in the range. 319 * 320 * Returns EINVAL if the range cannot be converted to resource. 321 * 322 * Note that if the range is an IO range, the resource will be converted 323 * using pci_address_to_pio() which can fail if it is called too early or 324 * if the range cannot be matched to any host bridge IO space (our case here). 325 * To guard against that we try to register the IO range first. 326 * If that fails we know that pci_address_to_pio() will do too. 327 */ 328 int of_pci_range_to_resource(struct of_pci_range *range, 329 struct device_node *np, struct resource *res) 330 { 331 int err; 332 res->flags = range->flags; 333 res->parent = res->child = res->sibling = NULL; 334 res->name = np->full_name; 335 336 if (res->flags & IORESOURCE_IO) { 337 unsigned long port; 338 err = pci_register_io_range(range->cpu_addr, range->size); 339 if (err) 340 goto invalid_range; 341 port = pci_address_to_pio(range->cpu_addr); 342 if (port == (unsigned long)-1) { 343 err = -EINVAL; 344 goto invalid_range; 345 } 346 res->start = port; 347 } else { 348 if ((sizeof(resource_size_t) < 8) && 349 upper_32_bits(range->cpu_addr)) { 350 err = -EINVAL; 351 goto invalid_range; 352 } 353 354 res->start = range->cpu_addr; 355 } 356 res->end = res->start + range->size - 1; 357 return 0; 358 359 invalid_range: 360 res->start = (resource_size_t)OF_BAD_ADDR; 361 res->end = (resource_size_t)OF_BAD_ADDR; 362 return err; 363 } 364 #endif /* CONFIG_PCI */ 365 366 /* 367 * ISA bus specific translator 368 */ 369 370 static int of_bus_isa_match(struct device_node *np) 371 { 372 return !strcmp(np->name, "isa"); 373 } 374 375 static void of_bus_isa_count_cells(struct device_node *child, 376 int *addrc, int *sizec) 377 { 378 if (addrc) 379 *addrc = 2; 380 if (sizec) 381 *sizec = 1; 382 } 383 384 static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, 385 int pna) 386 { 387 u64 cp, s, da; 388 389 /* Check address type match */ 390 if ((addr[0] ^ range[0]) & cpu_to_be32(1)) 391 return OF_BAD_ADDR; 392 393 /* Read address values, skipping high cell */ 394 cp = of_read_number(range + 1, na - 1); 395 s = of_read_number(range + na + pna, ns); 396 da = of_read_number(addr + 1, na - 1); 397 398 pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", 399 (unsigned long long)cp, (unsigned long long)s, 400 (unsigned long long)da); 401 402 if (da < cp || da >= (cp + s)) 403 return OF_BAD_ADDR; 404 return da - cp; 405 } 406 407 static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) 408 { 409 return of_bus_default_translate(addr + 1, offset, na - 1); 410 } 411 412 static unsigned int of_bus_isa_get_flags(const __be32 *addr) 413 { 414 unsigned int flags = 0; 415 u32 w = be32_to_cpup(addr); 416 417 if (w & 1) 418 flags |= IORESOURCE_IO; 419 else 420 flags |= IORESOURCE_MEM; 421 return flags; 422 } 423 424 /* 425 * Array of bus specific translators 426 */ 427 428 static struct of_bus of_busses[] = { 429 #ifdef CONFIG_OF_ADDRESS_PCI 430 /* PCI */ 431 { 432 .name = "pci", 433 .addresses = "assigned-addresses", 434 .match = of_bus_pci_match, 435 .count_cells = of_bus_pci_count_cells, 436 .map = of_bus_pci_map, 437 .translate = of_bus_pci_translate, 438 .get_flags = of_bus_pci_get_flags, 439 }, 440 #endif /* CONFIG_OF_ADDRESS_PCI */ 441 /* ISA */ 442 { 443 .name = "isa", 444 .addresses = "reg", 445 .match = of_bus_isa_match, 446 .count_cells = of_bus_isa_count_cells, 447 .map = of_bus_isa_map, 448 .translate = of_bus_isa_translate, 449 .get_flags = of_bus_isa_get_flags, 450 }, 451 /* Default */ 452 { 453 .name = "default", 454 .addresses = "reg", 455 .match = NULL, 456 .count_cells = of_bus_default_count_cells, 457 .map = of_bus_default_map, 458 .translate = of_bus_default_translate, 459 .get_flags = of_bus_default_get_flags, 460 }, 461 }; 462 463 static struct of_bus *of_match_bus(struct device_node *np) 464 { 465 int i; 466 467 for (i = 0; i < ARRAY_SIZE(of_busses); i++) 468 if (!of_busses[i].match || of_busses[i].match(np)) 469 return &of_busses[i]; 470 BUG(); 471 return NULL; 472 } 473 474 static int of_empty_ranges_quirk(struct device_node *np) 475 { 476 if (IS_ENABLED(CONFIG_PPC)) { 477 /* To save cycles, we cache the result for global "Mac" setting */ 478 static int quirk_state = -1; 479 480 /* PA-SEMI sdc DT bug */ 481 if (of_device_is_compatible(np, "1682m-sdc")) 482 return true; 483 484 /* Make quirk cached */ 485 if (quirk_state < 0) 486 quirk_state = 487 of_machine_is_compatible("Power Macintosh") || 488 of_machine_is_compatible("MacRISC"); 489 return quirk_state; 490 } 491 return false; 492 } 493 494 static int of_translate_one(struct device_node *parent, struct of_bus *bus, 495 struct of_bus *pbus, __be32 *addr, 496 int na, int ns, int pna, const char *rprop) 497 { 498 const __be32 *ranges; 499 unsigned int rlen; 500 int rone; 501 u64 offset = OF_BAD_ADDR; 502 503 /* 504 * Normally, an absence of a "ranges" property means we are 505 * crossing a non-translatable boundary, and thus the addresses 506 * below the current cannot be converted to CPU physical ones. 507 * Unfortunately, while this is very clear in the spec, it's not 508 * what Apple understood, and they do have things like /uni-n or 509 * /ht nodes with no "ranges" property and a lot of perfectly 510 * useable mapped devices below them. Thus we treat the absence of 511 * "ranges" as equivalent to an empty "ranges" property which means 512 * a 1:1 translation at that level. It's up to the caller not to try 513 * to translate addresses that aren't supposed to be translated in 514 * the first place. --BenH. 515 * 516 * As far as we know, this damage only exists on Apple machines, so 517 * This code is only enabled on powerpc. --gcl 518 */ 519 ranges = of_get_property(parent, rprop, &rlen); 520 if (ranges == NULL && !of_empty_ranges_quirk(parent)) { 521 pr_debug("no ranges; cannot translate\n"); 522 return 1; 523 } 524 if (ranges == NULL || rlen == 0) { 525 offset = of_read_number(addr, na); 526 memset(addr, 0, pna * 4); 527 pr_debug("empty ranges; 1:1 translation\n"); 528 goto finish; 529 } 530 531 pr_debug("walking ranges...\n"); 532 533 /* Now walk through the ranges */ 534 rlen /= 4; 535 rone = na + pna + ns; 536 for (; rlen >= rone; rlen -= rone, ranges += rone) { 537 offset = bus->map(addr, ranges, na, ns, pna); 538 if (offset != OF_BAD_ADDR) 539 break; 540 } 541 if (offset == OF_BAD_ADDR) { 542 pr_debug("not found !\n"); 543 return 1; 544 } 545 memcpy(addr, ranges + na, 4 * pna); 546 547 finish: 548 of_dump_addr("parent translation for:", addr, pna); 549 pr_debug("with offset: %llx\n", (unsigned long long)offset); 550 551 /* Translate it into parent bus space */ 552 return pbus->translate(addr, offset, pna); 553 } 554 555 /* 556 * Translate an address from the device-tree into a CPU physical address, 557 * this walks up the tree and applies the various bus mappings on the 558 * way. 559 * 560 * Note: We consider that crossing any level with #size-cells == 0 to mean 561 * that translation is impossible (that is we are not dealing with a value 562 * that can be mapped to a cpu physical address). This is not really specified 563 * that way, but this is traditionally the way IBM at least do things 564 */ 565 static u64 __of_translate_address(struct device_node *dev, 566 const __be32 *in_addr, const char *rprop) 567 { 568 struct device_node *parent = NULL; 569 struct of_bus *bus, *pbus; 570 __be32 addr[OF_MAX_ADDR_CELLS]; 571 int na, ns, pna, pns; 572 u64 result = OF_BAD_ADDR; 573 574 pr_debug("** translation for device %pOF **\n", dev); 575 576 /* Increase refcount at current level */ 577 of_node_get(dev); 578 579 /* Get parent & match bus type */ 580 parent = of_get_parent(dev); 581 if (parent == NULL) 582 goto bail; 583 bus = of_match_bus(parent); 584 585 /* Count address cells & copy address locally */ 586 bus->count_cells(dev, &na, &ns); 587 if (!OF_CHECK_COUNTS(na, ns)) { 588 pr_debug("Bad cell count for %pOF\n", dev); 589 goto bail; 590 } 591 memcpy(addr, in_addr, na * 4); 592 593 pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n", 594 bus->name, na, ns, parent); 595 of_dump_addr("translating address:", addr, na); 596 597 /* Translate */ 598 for (;;) { 599 /* Switch to parent bus */ 600 of_node_put(dev); 601 dev = parent; 602 parent = of_get_parent(dev); 603 604 /* If root, we have finished */ 605 if (parent == NULL) { 606 pr_debug("reached root node\n"); 607 result = of_read_number(addr, na); 608 break; 609 } 610 611 /* Get new parent bus and counts */ 612 pbus = of_match_bus(parent); 613 pbus->count_cells(dev, &pna, &pns); 614 if (!OF_CHECK_COUNTS(pna, pns)) { 615 pr_err("Bad cell count for %pOF\n", dev); 616 break; 617 } 618 619 pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n", 620 pbus->name, pna, pns, parent); 621 622 /* Apply bus translation */ 623 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) 624 break; 625 626 /* Complete the move up one level */ 627 na = pna; 628 ns = pns; 629 bus = pbus; 630 631 of_dump_addr("one level translation:", addr, na); 632 } 633 bail: 634 of_node_put(parent); 635 of_node_put(dev); 636 637 return result; 638 } 639 640 u64 of_translate_address(struct device_node *dev, const __be32 *in_addr) 641 { 642 return __of_translate_address(dev, in_addr, "ranges"); 643 } 644 EXPORT_SYMBOL(of_translate_address); 645 646 u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) 647 { 648 return __of_translate_address(dev, in_addr, "dma-ranges"); 649 } 650 EXPORT_SYMBOL(of_translate_dma_address); 651 652 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, 653 unsigned int *flags) 654 { 655 const __be32 *prop; 656 unsigned int psize; 657 struct device_node *parent; 658 struct of_bus *bus; 659 int onesize, i, na, ns; 660 661 /* Get parent & match bus type */ 662 parent = of_get_parent(dev); 663 if (parent == NULL) 664 return NULL; 665 bus = of_match_bus(parent); 666 bus->count_cells(dev, &na, &ns); 667 of_node_put(parent); 668 if (!OF_CHECK_ADDR_COUNT(na)) 669 return NULL; 670 671 /* Get "reg" or "assigned-addresses" property */ 672 prop = of_get_property(dev, bus->addresses, &psize); 673 if (prop == NULL) 674 return NULL; 675 psize /= 4; 676 677 onesize = na + ns; 678 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) 679 if (i == index) { 680 if (size) 681 *size = of_read_number(prop + na, ns); 682 if (flags) 683 *flags = bus->get_flags(prop); 684 return prop; 685 } 686 return NULL; 687 } 688 EXPORT_SYMBOL(of_get_address); 689 690 static int __of_address_to_resource(struct device_node *dev, 691 const __be32 *addrp, u64 size, unsigned int flags, 692 const char *name, struct resource *r) 693 { 694 u64 taddr; 695 696 if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 697 return -EINVAL; 698 taddr = of_translate_address(dev, addrp); 699 if (taddr == OF_BAD_ADDR) 700 return -EINVAL; 701 memset(r, 0, sizeof(struct resource)); 702 if (flags & IORESOURCE_IO) { 703 unsigned long port; 704 port = pci_address_to_pio(taddr); 705 if (port == (unsigned long)-1) 706 return -EINVAL; 707 r->start = port; 708 r->end = port + size - 1; 709 } else { 710 r->start = taddr; 711 r->end = taddr + size - 1; 712 } 713 r->flags = flags; 714 r->name = name ? name : dev->full_name; 715 716 return 0; 717 } 718 719 /** 720 * of_address_to_resource - Translate device tree address and return as resource 721 * 722 * Note that if your address is a PIO address, the conversion will fail if 723 * the physical address can't be internally converted to an IO token with 724 * pci_address_to_pio(), that is because it's either called too early or it 725 * can't be matched to any host bridge IO space 726 */ 727 int of_address_to_resource(struct device_node *dev, int index, 728 struct resource *r) 729 { 730 const __be32 *addrp; 731 u64 size; 732 unsigned int flags; 733 const char *name = NULL; 734 735 addrp = of_get_address(dev, index, &size, &flags); 736 if (addrp == NULL) 737 return -EINVAL; 738 739 /* Get optional "reg-names" property to add a name to a resource */ 740 of_property_read_string_index(dev, "reg-names", index, &name); 741 742 return __of_address_to_resource(dev, addrp, size, flags, name, r); 743 } 744 EXPORT_SYMBOL_GPL(of_address_to_resource); 745 746 struct device_node *of_find_matching_node_by_address(struct device_node *from, 747 const struct of_device_id *matches, 748 u64 base_address) 749 { 750 struct device_node *dn = of_find_matching_node(from, matches); 751 struct resource res; 752 753 while (dn) { 754 if (!of_address_to_resource(dn, 0, &res) && 755 res.start == base_address) 756 return dn; 757 758 dn = of_find_matching_node(dn, matches); 759 } 760 761 return NULL; 762 } 763 764 765 /** 766 * of_iomap - Maps the memory mapped IO for a given device_node 767 * @device: the device whose io range will be mapped 768 * @index: index of the io range 769 * 770 * Returns a pointer to the mapped memory 771 */ 772 void __iomem *of_iomap(struct device_node *np, int index) 773 { 774 struct resource res; 775 776 if (of_address_to_resource(np, index, &res)) 777 return NULL; 778 779 return ioremap(res.start, resource_size(&res)); 780 } 781 EXPORT_SYMBOL(of_iomap); 782 783 /* 784 * of_io_request_and_map - Requests a resource and maps the memory mapped IO 785 * for a given device_node 786 * @device: the device whose io range will be mapped 787 * @index: index of the io range 788 * @name: name of the resource 789 * 790 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded 791 * error code on failure. Usage example: 792 * 793 * base = of_io_request_and_map(node, 0, "foo"); 794 * if (IS_ERR(base)) 795 * return PTR_ERR(base); 796 */ 797 void __iomem *of_io_request_and_map(struct device_node *np, int index, 798 const char *name) 799 { 800 struct resource res; 801 void __iomem *mem; 802 803 if (of_address_to_resource(np, index, &res)) 804 return IOMEM_ERR_PTR(-EINVAL); 805 806 if (!request_mem_region(res.start, resource_size(&res), name)) 807 return IOMEM_ERR_PTR(-EBUSY); 808 809 mem = ioremap(res.start, resource_size(&res)); 810 if (!mem) { 811 release_mem_region(res.start, resource_size(&res)); 812 return IOMEM_ERR_PTR(-ENOMEM); 813 } 814 815 return mem; 816 } 817 EXPORT_SYMBOL(of_io_request_and_map); 818 819 /** 820 * of_dma_get_range - Get DMA range info 821 * @np: device node to get DMA range info 822 * @dma_addr: pointer to store initial DMA address of DMA range 823 * @paddr: pointer to store initial CPU address of DMA range 824 * @size: pointer to store size of DMA range 825 * 826 * Look in bottom up direction for the first "dma-ranges" property 827 * and parse it. 828 * dma-ranges format: 829 * DMA addr (dma_addr) : naddr cells 830 * CPU addr (phys_addr_t) : pna cells 831 * size : nsize cells 832 * 833 * It returns -ENODEV if "dma-ranges" property was not found 834 * for this device in DT. 835 */ 836 int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) 837 { 838 struct device_node *node = of_node_get(np); 839 const __be32 *ranges = NULL; 840 int len, naddr, nsize, pna; 841 int ret = 0; 842 u64 dmaaddr; 843 844 if (!node) 845 return -EINVAL; 846 847 while (1) { 848 naddr = of_n_addr_cells(node); 849 nsize = of_n_size_cells(node); 850 node = of_get_next_parent(node); 851 if (!node) 852 break; 853 854 ranges = of_get_property(node, "dma-ranges", &len); 855 856 /* Ignore empty ranges, they imply no translation required */ 857 if (ranges && len > 0) 858 break; 859 860 /* 861 * At least empty ranges has to be defined for parent node if 862 * DMA is supported 863 */ 864 if (!ranges) 865 break; 866 } 867 868 if (!ranges) { 869 pr_debug("no dma-ranges found for node(%pOF)\n", np); 870 ret = -ENODEV; 871 goto out; 872 } 873 874 len /= sizeof(u32); 875 876 pna = of_n_addr_cells(node); 877 878 /* dma-ranges format: 879 * DMA addr : naddr cells 880 * CPU addr : pna cells 881 * size : nsize cells 882 */ 883 dmaaddr = of_read_number(ranges, naddr); 884 *paddr = of_translate_dma_address(np, ranges); 885 if (*paddr == OF_BAD_ADDR) { 886 pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n", 887 dma_addr, np); 888 ret = -EINVAL; 889 goto out; 890 } 891 *dma_addr = dmaaddr; 892 893 *size = of_read_number(ranges + naddr + pna, nsize); 894 895 pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", 896 *dma_addr, *paddr, *size); 897 898 out: 899 of_node_put(node); 900 901 return ret; 902 } 903 EXPORT_SYMBOL_GPL(of_dma_get_range); 904 905 /** 906 * of_dma_is_coherent - Check if device is coherent 907 * @np: device node 908 * 909 * It returns true if "dma-coherent" property was found 910 * for this device in DT. 911 */ 912 bool of_dma_is_coherent(struct device_node *np) 913 { 914 struct device_node *node = of_node_get(np); 915 916 while (node) { 917 if (of_property_read_bool(node, "dma-coherent")) { 918 of_node_put(node); 919 return true; 920 } 921 node = of_get_next_parent(node); 922 } 923 of_node_put(node); 924 return false; 925 } 926 EXPORT_SYMBOL_GPL(of_dma_is_coherent); 927