1 2 #define pr_fmt(fmt) "OF: " fmt 3 4 #include <linux/device.h> 5 #include <linux/io.h> 6 #include <linux/ioport.h> 7 #include <linux/module.h> 8 #include <linux/of_address.h> 9 #include <linux/pci.h> 10 #include <linux/pci_regs.h> 11 #include <linux/sizes.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 15 /* Max address size we deal with */ 16 #define OF_MAX_ADDR_CELLS 4 17 #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS) 18 #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0) 19 20 static struct of_bus *of_match_bus(struct device_node *np); 21 static int __of_address_to_resource(struct device_node *dev, 22 const __be32 *addrp, u64 size, unsigned int flags, 23 const char *name, struct resource *r); 24 25 /* Debug utility */ 26 #ifdef DEBUG 27 static void of_dump_addr(const char *s, const __be32 *addr, int na) 28 { 29 pr_debug("%s", s); 30 while (na--) 31 pr_cont(" %08x", be32_to_cpu(*(addr++))); 32 pr_cont("\n"); 33 } 34 #else 35 static void of_dump_addr(const char *s, const __be32 *addr, int na) { } 36 #endif 37 38 /* Callbacks for bus specific translators */ 39 struct of_bus { 40 const char *name; 41 const char *addresses; 42 int (*match)(struct device_node *parent); 43 void (*count_cells)(struct device_node *child, 44 int *addrc, int *sizec); 45 u64 (*map)(__be32 *addr, const __be32 *range, 46 int na, int ns, int pna); 47 int (*translate)(__be32 *addr, u64 offset, int na); 48 unsigned int (*get_flags)(const __be32 *addr); 49 }; 50 51 /* 52 * Default translator (generic bus) 53 */ 54 55 static void of_bus_default_count_cells(struct device_node *dev, 56 int *addrc, int *sizec) 57 { 58 if (addrc) 59 *addrc = of_n_addr_cells(dev); 60 if (sizec) 61 *sizec = of_n_size_cells(dev); 62 } 63 64 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, 65 int na, int ns, int pna) 66 { 67 u64 cp, s, da; 68 69 cp = of_read_number(range, na); 70 s = of_read_number(range + na + pna, ns); 71 da = of_read_number(addr, na); 72 73 pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", 74 (unsigned long long)cp, (unsigned long long)s, 75 (unsigned long long)da); 76 77 if (da < cp || da >= (cp + s)) 78 return OF_BAD_ADDR; 79 return da - cp; 80 } 81 82 static int of_bus_default_translate(__be32 *addr, u64 offset, int na) 83 { 84 u64 a = of_read_number(addr, na); 85 memset(addr, 0, na * 4); 86 a += offset; 87 if (na > 1) 88 addr[na - 2] = cpu_to_be32(a >> 32); 89 addr[na - 1] = cpu_to_be32(a & 0xffffffffu); 90 91 return 0; 92 } 93 94 static unsigned int of_bus_default_get_flags(const __be32 *addr) 95 { 96 return IORESOURCE_MEM; 97 } 98 99 #ifdef CONFIG_OF_ADDRESS_PCI 100 /* 101 * PCI bus specific translator 102 */ 103 104 static int of_bus_pci_match(struct device_node *np) 105 { 106 /* 107 * "pciex" is PCI Express 108 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs 109 * "ht" is hypertransport 110 */ 111 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || 112 !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); 113 } 114 115 static void of_bus_pci_count_cells(struct device_node *np, 116 int *addrc, int *sizec) 117 { 118 if (addrc) 119 *addrc = 3; 120 if (sizec) 121 *sizec = 2; 122 } 123 124 static unsigned int of_bus_pci_get_flags(const __be32 *addr) 125 { 126 unsigned int flags = 0; 127 u32 w = be32_to_cpup(addr); 128 129 switch((w >> 24) & 0x03) { 130 case 0x01: 131 flags |= IORESOURCE_IO; 132 break; 133 case 0x02: /* 32 bits */ 134 case 0x03: /* 64 bits */ 135 flags |= IORESOURCE_MEM; 136 break; 137 } 138 if (w & 0x40000000) 139 flags |= IORESOURCE_PREFETCH; 140 return flags; 141 } 142 143 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, 144 int pna) 145 { 146 u64 cp, s, da; 147 unsigned int af, rf; 148 149 af = of_bus_pci_get_flags(addr); 150 rf = of_bus_pci_get_flags(range); 151 152 /* Check address type match */ 153 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) 154 return OF_BAD_ADDR; 155 156 /* Read address values, skipping high cell */ 157 cp = of_read_number(range + 1, na - 1); 158 s = of_read_number(range + na + pna, ns); 159 da = of_read_number(addr + 1, na - 1); 160 161 pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", 162 (unsigned long long)cp, (unsigned long long)s, 163 (unsigned long long)da); 164 165 if (da < cp || da >= (cp + s)) 166 return OF_BAD_ADDR; 167 return da - cp; 168 } 169 170 static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) 171 { 172 return of_bus_default_translate(addr + 1, offset, na - 1); 173 } 174 #endif /* CONFIG_OF_ADDRESS_PCI */ 175 176 #ifdef CONFIG_PCI 177 const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 178 unsigned int *flags) 179 { 180 const __be32 *prop; 181 unsigned int psize; 182 struct device_node *parent; 183 struct of_bus *bus; 184 int onesize, i, na, ns; 185 186 /* Get parent & match bus type */ 187 parent = of_get_parent(dev); 188 if (parent == NULL) 189 return NULL; 190 bus = of_match_bus(parent); 191 if (strcmp(bus->name, "pci")) { 192 of_node_put(parent); 193 return NULL; 194 } 195 bus->count_cells(dev, &na, &ns); 196 of_node_put(parent); 197 if (!OF_CHECK_ADDR_COUNT(na)) 198 return NULL; 199 200 /* Get "reg" or "assigned-addresses" property */ 201 prop = of_get_property(dev, bus->addresses, &psize); 202 if (prop == NULL) 203 return NULL; 204 psize /= 4; 205 206 onesize = na + ns; 207 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { 208 u32 val = be32_to_cpu(prop[0]); 209 if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { 210 if (size) 211 *size = of_read_number(prop + na, ns); 212 if (flags) 213 *flags = bus->get_flags(prop); 214 return prop; 215 } 216 } 217 return NULL; 218 } 219 EXPORT_SYMBOL(of_get_pci_address); 220 221 int of_pci_address_to_resource(struct device_node *dev, int bar, 222 struct resource *r) 223 { 224 const __be32 *addrp; 225 u64 size; 226 unsigned int flags; 227 228 addrp = of_get_pci_address(dev, bar, &size, &flags); 229 if (addrp == NULL) 230 return -EINVAL; 231 return __of_address_to_resource(dev, addrp, size, flags, NULL, r); 232 } 233 EXPORT_SYMBOL_GPL(of_pci_address_to_resource); 234 235 int of_pci_range_parser_init(struct of_pci_range_parser *parser, 236 struct device_node *node) 237 { 238 const int na = 3, ns = 2; 239 int rlen; 240 241 parser->node = node; 242 parser->pna = of_n_addr_cells(node); 243 parser->np = parser->pna + na + ns; 244 245 parser->range = of_get_property(node, "ranges", &rlen); 246 if (parser->range == NULL) 247 return -ENOENT; 248 249 parser->end = parser->range + rlen / sizeof(__be32); 250 251 return 0; 252 } 253 EXPORT_SYMBOL_GPL(of_pci_range_parser_init); 254 255 struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, 256 struct of_pci_range *range) 257 { 258 const int na = 3, ns = 2; 259 260 if (!range) 261 return NULL; 262 263 if (!parser->range || parser->range + parser->np > parser->end) 264 return NULL; 265 266 range->pci_space = parser->range[0]; 267 range->flags = of_bus_pci_get_flags(parser->range); 268 range->pci_addr = of_read_number(parser->range + 1, ns); 269 range->cpu_addr = of_translate_address(parser->node, 270 parser->range + na); 271 range->size = of_read_number(parser->range + parser->pna + na, ns); 272 273 parser->range += parser->np; 274 275 /* Now consume following elements while they are contiguous */ 276 while (parser->range + parser->np <= parser->end) { 277 u32 flags, pci_space; 278 u64 pci_addr, cpu_addr, size; 279 280 pci_space = be32_to_cpup(parser->range); 281 flags = of_bus_pci_get_flags(parser->range); 282 pci_addr = of_read_number(parser->range + 1, ns); 283 cpu_addr = of_translate_address(parser->node, 284 parser->range + na); 285 size = of_read_number(parser->range + parser->pna + na, ns); 286 287 if (flags != range->flags) 288 break; 289 if (pci_addr != range->pci_addr + range->size || 290 cpu_addr != range->cpu_addr + range->size) 291 break; 292 293 range->size += size; 294 parser->range += parser->np; 295 } 296 297 return range; 298 } 299 EXPORT_SYMBOL_GPL(of_pci_range_parser_one); 300 301 /* 302 * of_pci_range_to_resource - Create a resource from an of_pci_range 303 * @range: the PCI range that describes the resource 304 * @np: device node where the range belongs to 305 * @res: pointer to a valid resource that will be updated to 306 * reflect the values contained in the range. 307 * 308 * Returns EINVAL if the range cannot be converted to resource. 309 * 310 * Note that if the range is an IO range, the resource will be converted 311 * using pci_address_to_pio() which can fail if it is called too early or 312 * if the range cannot be matched to any host bridge IO space (our case here). 313 * To guard against that we try to register the IO range first. 314 * If that fails we know that pci_address_to_pio() will do too. 315 */ 316 int of_pci_range_to_resource(struct of_pci_range *range, 317 struct device_node *np, struct resource *res) 318 { 319 int err; 320 res->flags = range->flags; 321 res->parent = res->child = res->sibling = NULL; 322 res->name = np->full_name; 323 324 if (res->flags & IORESOURCE_IO) { 325 unsigned long port; 326 err = pci_register_io_range(range->cpu_addr, range->size); 327 if (err) 328 goto invalid_range; 329 port = pci_address_to_pio(range->cpu_addr); 330 if (port == (unsigned long)-1) { 331 err = -EINVAL; 332 goto invalid_range; 333 } 334 res->start = port; 335 } else { 336 if ((sizeof(resource_size_t) < 8) && 337 upper_32_bits(range->cpu_addr)) { 338 err = -EINVAL; 339 goto invalid_range; 340 } 341 342 res->start = range->cpu_addr; 343 } 344 res->end = res->start + range->size - 1; 345 return 0; 346 347 invalid_range: 348 res->start = (resource_size_t)OF_BAD_ADDR; 349 res->end = (resource_size_t)OF_BAD_ADDR; 350 return err; 351 } 352 #endif /* CONFIG_PCI */ 353 354 /* 355 * ISA bus specific translator 356 */ 357 358 static int of_bus_isa_match(struct device_node *np) 359 { 360 return !strcmp(np->name, "isa"); 361 } 362 363 static void of_bus_isa_count_cells(struct device_node *child, 364 int *addrc, int *sizec) 365 { 366 if (addrc) 367 *addrc = 2; 368 if (sizec) 369 *sizec = 1; 370 } 371 372 static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, 373 int pna) 374 { 375 u64 cp, s, da; 376 377 /* Check address type match */ 378 if ((addr[0] ^ range[0]) & cpu_to_be32(1)) 379 return OF_BAD_ADDR; 380 381 /* Read address values, skipping high cell */ 382 cp = of_read_number(range + 1, na - 1); 383 s = of_read_number(range + na + pna, ns); 384 da = of_read_number(addr + 1, na - 1); 385 386 pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", 387 (unsigned long long)cp, (unsigned long long)s, 388 (unsigned long long)da); 389 390 if (da < cp || da >= (cp + s)) 391 return OF_BAD_ADDR; 392 return da - cp; 393 } 394 395 static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) 396 { 397 return of_bus_default_translate(addr + 1, offset, na - 1); 398 } 399 400 static unsigned int of_bus_isa_get_flags(const __be32 *addr) 401 { 402 unsigned int flags = 0; 403 u32 w = be32_to_cpup(addr); 404 405 if (w & 1) 406 flags |= IORESOURCE_IO; 407 else 408 flags |= IORESOURCE_MEM; 409 return flags; 410 } 411 412 /* 413 * Array of bus specific translators 414 */ 415 416 static struct of_bus of_busses[] = { 417 #ifdef CONFIG_OF_ADDRESS_PCI 418 /* PCI */ 419 { 420 .name = "pci", 421 .addresses = "assigned-addresses", 422 .match = of_bus_pci_match, 423 .count_cells = of_bus_pci_count_cells, 424 .map = of_bus_pci_map, 425 .translate = of_bus_pci_translate, 426 .get_flags = of_bus_pci_get_flags, 427 }, 428 #endif /* CONFIG_OF_ADDRESS_PCI */ 429 /* ISA */ 430 { 431 .name = "isa", 432 .addresses = "reg", 433 .match = of_bus_isa_match, 434 .count_cells = of_bus_isa_count_cells, 435 .map = of_bus_isa_map, 436 .translate = of_bus_isa_translate, 437 .get_flags = of_bus_isa_get_flags, 438 }, 439 /* Default */ 440 { 441 .name = "default", 442 .addresses = "reg", 443 .match = NULL, 444 .count_cells = of_bus_default_count_cells, 445 .map = of_bus_default_map, 446 .translate = of_bus_default_translate, 447 .get_flags = of_bus_default_get_flags, 448 }, 449 }; 450 451 static struct of_bus *of_match_bus(struct device_node *np) 452 { 453 int i; 454 455 for (i = 0; i < ARRAY_SIZE(of_busses); i++) 456 if (!of_busses[i].match || of_busses[i].match(np)) 457 return &of_busses[i]; 458 BUG(); 459 return NULL; 460 } 461 462 static int of_empty_ranges_quirk(struct device_node *np) 463 { 464 if (IS_ENABLED(CONFIG_PPC)) { 465 /* To save cycles, we cache the result for global "Mac" setting */ 466 static int quirk_state = -1; 467 468 /* PA-SEMI sdc DT bug */ 469 if (of_device_is_compatible(np, "1682m-sdc")) 470 return true; 471 472 /* Make quirk cached */ 473 if (quirk_state < 0) 474 quirk_state = 475 of_machine_is_compatible("Power Macintosh") || 476 of_machine_is_compatible("MacRISC"); 477 return quirk_state; 478 } 479 return false; 480 } 481 482 static int of_translate_one(struct device_node *parent, struct of_bus *bus, 483 struct of_bus *pbus, __be32 *addr, 484 int na, int ns, int pna, const char *rprop) 485 { 486 const __be32 *ranges; 487 unsigned int rlen; 488 int rone; 489 u64 offset = OF_BAD_ADDR; 490 491 /* 492 * Normally, an absence of a "ranges" property means we are 493 * crossing a non-translatable boundary, and thus the addresses 494 * below the current cannot be converted to CPU physical ones. 495 * Unfortunately, while this is very clear in the spec, it's not 496 * what Apple understood, and they do have things like /uni-n or 497 * /ht nodes with no "ranges" property and a lot of perfectly 498 * useable mapped devices below them. Thus we treat the absence of 499 * "ranges" as equivalent to an empty "ranges" property which means 500 * a 1:1 translation at that level. It's up to the caller not to try 501 * to translate addresses that aren't supposed to be translated in 502 * the first place. --BenH. 503 * 504 * As far as we know, this damage only exists on Apple machines, so 505 * This code is only enabled on powerpc. --gcl 506 */ 507 ranges = of_get_property(parent, rprop, &rlen); 508 if (ranges == NULL && !of_empty_ranges_quirk(parent)) { 509 pr_debug("no ranges; cannot translate\n"); 510 return 1; 511 } 512 if (ranges == NULL || rlen == 0) { 513 offset = of_read_number(addr, na); 514 memset(addr, 0, pna * 4); 515 pr_debug("empty ranges; 1:1 translation\n"); 516 goto finish; 517 } 518 519 pr_debug("walking ranges...\n"); 520 521 /* Now walk through the ranges */ 522 rlen /= 4; 523 rone = na + pna + ns; 524 for (; rlen >= rone; rlen -= rone, ranges += rone) { 525 offset = bus->map(addr, ranges, na, ns, pna); 526 if (offset != OF_BAD_ADDR) 527 break; 528 } 529 if (offset == OF_BAD_ADDR) { 530 pr_debug("not found !\n"); 531 return 1; 532 } 533 memcpy(addr, ranges + na, 4 * pna); 534 535 finish: 536 of_dump_addr("parent translation for:", addr, pna); 537 pr_debug("with offset: %llx\n", (unsigned long long)offset); 538 539 /* Translate it into parent bus space */ 540 return pbus->translate(addr, offset, pna); 541 } 542 543 /* 544 * Translate an address from the device-tree into a CPU physical address, 545 * this walks up the tree and applies the various bus mappings on the 546 * way. 547 * 548 * Note: We consider that crossing any level with #size-cells == 0 to mean 549 * that translation is impossible (that is we are not dealing with a value 550 * that can be mapped to a cpu physical address). This is not really specified 551 * that way, but this is traditionally the way IBM at least do things 552 */ 553 static u64 __of_translate_address(struct device_node *dev, 554 const __be32 *in_addr, const char *rprop) 555 { 556 struct device_node *parent = NULL; 557 struct of_bus *bus, *pbus; 558 __be32 addr[OF_MAX_ADDR_CELLS]; 559 int na, ns, pna, pns; 560 u64 result = OF_BAD_ADDR; 561 562 pr_debug("** translation for device %s **\n", of_node_full_name(dev)); 563 564 /* Increase refcount at current level */ 565 of_node_get(dev); 566 567 /* Get parent & match bus type */ 568 parent = of_get_parent(dev); 569 if (parent == NULL) 570 goto bail; 571 bus = of_match_bus(parent); 572 573 /* Count address cells & copy address locally */ 574 bus->count_cells(dev, &na, &ns); 575 if (!OF_CHECK_COUNTS(na, ns)) { 576 pr_debug("Bad cell count for %s\n", of_node_full_name(dev)); 577 goto bail; 578 } 579 memcpy(addr, in_addr, na * 4); 580 581 pr_debug("bus is %s (na=%d, ns=%d) on %s\n", 582 bus->name, na, ns, of_node_full_name(parent)); 583 of_dump_addr("translating address:", addr, na); 584 585 /* Translate */ 586 for (;;) { 587 /* Switch to parent bus */ 588 of_node_put(dev); 589 dev = parent; 590 parent = of_get_parent(dev); 591 592 /* If root, we have finished */ 593 if (parent == NULL) { 594 pr_debug("reached root node\n"); 595 result = of_read_number(addr, na); 596 break; 597 } 598 599 /* Get new parent bus and counts */ 600 pbus = of_match_bus(parent); 601 pbus->count_cells(dev, &pna, &pns); 602 if (!OF_CHECK_COUNTS(pna, pns)) { 603 pr_err("Bad cell count for %s\n", 604 of_node_full_name(dev)); 605 break; 606 } 607 608 pr_debug("parent bus is %s (na=%d, ns=%d) on %s\n", 609 pbus->name, pna, pns, of_node_full_name(parent)); 610 611 /* Apply bus translation */ 612 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) 613 break; 614 615 /* Complete the move up one level */ 616 na = pna; 617 ns = pns; 618 bus = pbus; 619 620 of_dump_addr("one level translation:", addr, na); 621 } 622 bail: 623 of_node_put(parent); 624 of_node_put(dev); 625 626 return result; 627 } 628 629 u64 of_translate_address(struct device_node *dev, const __be32 *in_addr) 630 { 631 return __of_translate_address(dev, in_addr, "ranges"); 632 } 633 EXPORT_SYMBOL(of_translate_address); 634 635 u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) 636 { 637 return __of_translate_address(dev, in_addr, "dma-ranges"); 638 } 639 EXPORT_SYMBOL(of_translate_dma_address); 640 641 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, 642 unsigned int *flags) 643 { 644 const __be32 *prop; 645 unsigned int psize; 646 struct device_node *parent; 647 struct of_bus *bus; 648 int onesize, i, na, ns; 649 650 /* Get parent & match bus type */ 651 parent = of_get_parent(dev); 652 if (parent == NULL) 653 return NULL; 654 bus = of_match_bus(parent); 655 bus->count_cells(dev, &na, &ns); 656 of_node_put(parent); 657 if (!OF_CHECK_ADDR_COUNT(na)) 658 return NULL; 659 660 /* Get "reg" or "assigned-addresses" property */ 661 prop = of_get_property(dev, bus->addresses, &psize); 662 if (prop == NULL) 663 return NULL; 664 psize /= 4; 665 666 onesize = na + ns; 667 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) 668 if (i == index) { 669 if (size) 670 *size = of_read_number(prop + na, ns); 671 if (flags) 672 *flags = bus->get_flags(prop); 673 return prop; 674 } 675 return NULL; 676 } 677 EXPORT_SYMBOL(of_get_address); 678 679 static int __of_address_to_resource(struct device_node *dev, 680 const __be32 *addrp, u64 size, unsigned int flags, 681 const char *name, struct resource *r) 682 { 683 u64 taddr; 684 685 if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 686 return -EINVAL; 687 taddr = of_translate_address(dev, addrp); 688 if (taddr == OF_BAD_ADDR) 689 return -EINVAL; 690 memset(r, 0, sizeof(struct resource)); 691 if (flags & IORESOURCE_IO) { 692 unsigned long port; 693 port = pci_address_to_pio(taddr); 694 if (port == (unsigned long)-1) 695 return -EINVAL; 696 r->start = port; 697 r->end = port + size - 1; 698 } else { 699 r->start = taddr; 700 r->end = taddr + size - 1; 701 } 702 r->flags = flags; 703 r->name = name ? name : dev->full_name; 704 705 return 0; 706 } 707 708 /** 709 * of_address_to_resource - Translate device tree address and return as resource 710 * 711 * Note that if your address is a PIO address, the conversion will fail if 712 * the physical address can't be internally converted to an IO token with 713 * pci_address_to_pio(), that is because it's either called to early or it 714 * can't be matched to any host bridge IO space 715 */ 716 int of_address_to_resource(struct device_node *dev, int index, 717 struct resource *r) 718 { 719 const __be32 *addrp; 720 u64 size; 721 unsigned int flags; 722 const char *name = NULL; 723 724 addrp = of_get_address(dev, index, &size, &flags); 725 if (addrp == NULL) 726 return -EINVAL; 727 728 /* Get optional "reg-names" property to add a name to a resource */ 729 of_property_read_string_index(dev, "reg-names", index, &name); 730 731 return __of_address_to_resource(dev, addrp, size, flags, name, r); 732 } 733 EXPORT_SYMBOL_GPL(of_address_to_resource); 734 735 struct device_node *of_find_matching_node_by_address(struct device_node *from, 736 const struct of_device_id *matches, 737 u64 base_address) 738 { 739 struct device_node *dn = of_find_matching_node(from, matches); 740 struct resource res; 741 742 while (dn) { 743 if (!of_address_to_resource(dn, 0, &res) && 744 res.start == base_address) 745 return dn; 746 747 dn = of_find_matching_node(dn, matches); 748 } 749 750 return NULL; 751 } 752 753 754 /** 755 * of_iomap - Maps the memory mapped IO for a given device_node 756 * @device: the device whose io range will be mapped 757 * @index: index of the io range 758 * 759 * Returns a pointer to the mapped memory 760 */ 761 void __iomem *of_iomap(struct device_node *np, int index) 762 { 763 struct resource res; 764 765 if (of_address_to_resource(np, index, &res)) 766 return NULL; 767 768 return ioremap(res.start, resource_size(&res)); 769 } 770 EXPORT_SYMBOL(of_iomap); 771 772 /* 773 * of_io_request_and_map - Requests a resource and maps the memory mapped IO 774 * for a given device_node 775 * @device: the device whose io range will be mapped 776 * @index: index of the io range 777 * @name: name of the resource 778 * 779 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded 780 * error code on failure. Usage example: 781 * 782 * base = of_io_request_and_map(node, 0, "foo"); 783 * if (IS_ERR(base)) 784 * return PTR_ERR(base); 785 */ 786 void __iomem *of_io_request_and_map(struct device_node *np, int index, 787 const char *name) 788 { 789 struct resource res; 790 void __iomem *mem; 791 792 if (of_address_to_resource(np, index, &res)) 793 return IOMEM_ERR_PTR(-EINVAL); 794 795 if (!request_mem_region(res.start, resource_size(&res), name)) 796 return IOMEM_ERR_PTR(-EBUSY); 797 798 mem = ioremap(res.start, resource_size(&res)); 799 if (!mem) { 800 release_mem_region(res.start, resource_size(&res)); 801 return IOMEM_ERR_PTR(-ENOMEM); 802 } 803 804 return mem; 805 } 806 EXPORT_SYMBOL(of_io_request_and_map); 807 808 /** 809 * of_dma_get_range - Get DMA range info 810 * @np: device node to get DMA range info 811 * @dma_addr: pointer to store initial DMA address of DMA range 812 * @paddr: pointer to store initial CPU address of DMA range 813 * @size: pointer to store size of DMA range 814 * 815 * Look in bottom up direction for the first "dma-ranges" property 816 * and parse it. 817 * dma-ranges format: 818 * DMA addr (dma_addr) : naddr cells 819 * CPU addr (phys_addr_t) : pna cells 820 * size : nsize cells 821 * 822 * It returns -ENODEV if "dma-ranges" property was not found 823 * for this device in DT. 824 */ 825 int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) 826 { 827 struct device_node *node = of_node_get(np); 828 const __be32 *ranges = NULL; 829 int len, naddr, nsize, pna; 830 int ret = 0; 831 u64 dmaaddr; 832 833 if (!node) 834 return -EINVAL; 835 836 while (1) { 837 naddr = of_n_addr_cells(node); 838 nsize = of_n_size_cells(node); 839 node = of_get_next_parent(node); 840 if (!node) 841 break; 842 843 ranges = of_get_property(node, "dma-ranges", &len); 844 845 /* Ignore empty ranges, they imply no translation required */ 846 if (ranges && len > 0) 847 break; 848 849 /* 850 * At least empty ranges has to be defined for parent node if 851 * DMA is supported 852 */ 853 if (!ranges) 854 break; 855 } 856 857 if (!ranges) { 858 pr_debug("no dma-ranges found for node(%s)\n", np->full_name); 859 ret = -ENODEV; 860 goto out; 861 } 862 863 len /= sizeof(u32); 864 865 pna = of_n_addr_cells(node); 866 867 /* dma-ranges format: 868 * DMA addr : naddr cells 869 * CPU addr : pna cells 870 * size : nsize cells 871 */ 872 dmaaddr = of_read_number(ranges, naddr); 873 *paddr = of_translate_dma_address(np, ranges); 874 if (*paddr == OF_BAD_ADDR) { 875 pr_err("translation of DMA address(%pad) to CPU address failed node(%s)\n", 876 dma_addr, np->full_name); 877 ret = -EINVAL; 878 goto out; 879 } 880 *dma_addr = dmaaddr; 881 882 *size = of_read_number(ranges + naddr + pna, nsize); 883 884 pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", 885 *dma_addr, *paddr, *size); 886 887 out: 888 of_node_put(node); 889 890 return ret; 891 } 892 EXPORT_SYMBOL_GPL(of_dma_get_range); 893 894 /** 895 * of_dma_is_coherent - Check if device is coherent 896 * @np: device node 897 * 898 * It returns true if "dma-coherent" property was found 899 * for this device in DT. 900 */ 901 bool of_dma_is_coherent(struct device_node *np) 902 { 903 struct device_node *node = of_node_get(np); 904 905 while (node) { 906 if (of_property_read_bool(node, "dma-coherent")) { 907 of_node_put(node); 908 return true; 909 } 910 node = of_get_next_parent(node); 911 } 912 of_node_put(node); 913 return false; 914 } 915 EXPORT_SYMBOL_GPL(of_dma_is_coherent); 916