1 2 #define pr_fmt(fmt) "OF: " fmt 3 4 #include <linux/device.h> 5 #include <linux/io.h> 6 #include <linux/ioport.h> 7 #include <linux/module.h> 8 #include <linux/of_address.h> 9 #include <linux/pci.h> 10 #include <linux/pci_regs.h> 11 #include <linux/sizes.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 15 /* Max address size we deal with */ 16 #define OF_MAX_ADDR_CELLS 4 17 #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS) 18 #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0) 19 20 static struct of_bus *of_match_bus(struct device_node *np); 21 static int __of_address_to_resource(struct device_node *dev, 22 const __be32 *addrp, u64 size, unsigned int flags, 23 const char *name, struct resource *r); 24 25 /* Debug utility */ 26 #ifdef DEBUG 27 static void of_dump_addr(const char *s, const __be32 *addr, int na) 28 { 29 pr_debug("%s", s); 30 while (na--) 31 pr_cont(" %08x", be32_to_cpu(*(addr++))); 32 pr_cont("\n"); 33 } 34 #else 35 static void of_dump_addr(const char *s, const __be32 *addr, int na) { } 36 #endif 37 38 /* Callbacks for bus specific translators */ 39 struct of_bus { 40 const char *name; 41 const char *addresses; 42 int (*match)(struct device_node *parent); 43 void (*count_cells)(struct device_node *child, 44 int *addrc, int *sizec); 45 u64 (*map)(__be32 *addr, const __be32 *range, 46 int na, int ns, int pna); 47 int (*translate)(__be32 *addr, u64 offset, int na); 48 unsigned int (*get_flags)(const __be32 *addr); 49 }; 50 51 /* 52 * Default translator (generic bus) 53 */ 54 55 static void of_bus_default_count_cells(struct device_node *dev, 56 int *addrc, int *sizec) 57 { 58 if (addrc) 59 *addrc = of_n_addr_cells(dev); 60 if (sizec) 61 *sizec = of_n_size_cells(dev); 62 } 63 64 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, 65 int na, int ns, int pna) 66 { 67 u64 cp, s, da; 68 69 cp = of_read_number(range, na); 70 s = of_read_number(range + na + pna, ns); 71 da = of_read_number(addr, na); 72 73 pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", 74 (unsigned long long)cp, (unsigned long long)s, 75 (unsigned long long)da); 76 77 if (da < cp || da >= (cp + s)) 78 return OF_BAD_ADDR; 79 return da - cp; 80 } 81 82 static int of_bus_default_translate(__be32 *addr, u64 offset, int na) 83 { 84 u64 a = of_read_number(addr, na); 85 memset(addr, 0, na * 4); 86 a += offset; 87 if (na > 1) 88 addr[na - 2] = cpu_to_be32(a >> 32); 89 addr[na - 1] = cpu_to_be32(a & 0xffffffffu); 90 91 return 0; 92 } 93 94 static unsigned int of_bus_default_get_flags(const __be32 *addr) 95 { 96 return IORESOURCE_MEM; 97 } 98 99 #ifdef CONFIG_OF_ADDRESS_PCI 100 /* 101 * PCI bus specific translator 102 */ 103 104 static int of_bus_pci_match(struct device_node *np) 105 { 106 /* 107 * "pciex" is PCI Express 108 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs 109 * "ht" is hypertransport 110 */ 111 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || 112 !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); 113 } 114 115 static void of_bus_pci_count_cells(struct device_node *np, 116 int *addrc, int *sizec) 117 { 118 if (addrc) 119 *addrc = 3; 120 if (sizec) 121 *sizec = 2; 122 } 123 124 static unsigned int of_bus_pci_get_flags(const __be32 *addr) 125 { 126 unsigned int flags = 0; 127 u32 w = be32_to_cpup(addr); 128 129 switch((w >> 24) & 0x03) { 130 case 0x01: 131 flags |= IORESOURCE_IO; 132 break; 133 case 0x02: /* 32 bits */ 134 case 0x03: /* 64 bits */ 135 flags |= IORESOURCE_MEM; 136 break; 137 } 138 if (w & 0x40000000) 139 flags |= IORESOURCE_PREFETCH; 140 return flags; 141 } 142 143 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, 144 int pna) 145 { 146 u64 cp, s, da; 147 unsigned int af, rf; 148 149 af = of_bus_pci_get_flags(addr); 150 rf = of_bus_pci_get_flags(range); 151 152 /* Check address type match */ 153 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) 154 return OF_BAD_ADDR; 155 156 /* Read address values, skipping high cell */ 157 cp = of_read_number(range + 1, na - 1); 158 s = of_read_number(range + na + pna, ns); 159 da = of_read_number(addr + 1, na - 1); 160 161 pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", 162 (unsigned long long)cp, (unsigned long long)s, 163 (unsigned long long)da); 164 165 if (da < cp || da >= (cp + s)) 166 return OF_BAD_ADDR; 167 return da - cp; 168 } 169 170 static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) 171 { 172 return of_bus_default_translate(addr + 1, offset, na - 1); 173 } 174 #endif /* CONFIG_OF_ADDRESS_PCI */ 175 176 #ifdef CONFIG_PCI 177 const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 178 unsigned int *flags) 179 { 180 const __be32 *prop; 181 unsigned int psize; 182 struct device_node *parent; 183 struct of_bus *bus; 184 int onesize, i, na, ns; 185 186 /* Get parent & match bus type */ 187 parent = of_get_parent(dev); 188 if (parent == NULL) 189 return NULL; 190 bus = of_match_bus(parent); 191 if (strcmp(bus->name, "pci")) { 192 of_node_put(parent); 193 return NULL; 194 } 195 bus->count_cells(dev, &na, &ns); 196 of_node_put(parent); 197 if (!OF_CHECK_ADDR_COUNT(na)) 198 return NULL; 199 200 /* Get "reg" or "assigned-addresses" property */ 201 prop = of_get_property(dev, bus->addresses, &psize); 202 if (prop == NULL) 203 return NULL; 204 psize /= 4; 205 206 onesize = na + ns; 207 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { 208 u32 val = be32_to_cpu(prop[0]); 209 if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { 210 if (size) 211 *size = of_read_number(prop + na, ns); 212 if (flags) 213 *flags = bus->get_flags(prop); 214 return prop; 215 } 216 } 217 return NULL; 218 } 219 EXPORT_SYMBOL(of_get_pci_address); 220 221 int of_pci_address_to_resource(struct device_node *dev, int bar, 222 struct resource *r) 223 { 224 const __be32 *addrp; 225 u64 size; 226 unsigned int flags; 227 228 addrp = of_get_pci_address(dev, bar, &size, &flags); 229 if (addrp == NULL) 230 return -EINVAL; 231 return __of_address_to_resource(dev, addrp, size, flags, NULL, r); 232 } 233 EXPORT_SYMBOL_GPL(of_pci_address_to_resource); 234 235 int of_pci_range_parser_init(struct of_pci_range_parser *parser, 236 struct device_node *node) 237 { 238 const int na = 3, ns = 2; 239 int rlen; 240 241 parser->node = node; 242 parser->pna = of_n_addr_cells(node); 243 parser->np = parser->pna + na + ns; 244 245 parser->range = of_get_property(node, "ranges", &rlen); 246 if (parser->range == NULL) 247 return -ENOENT; 248 249 parser->end = parser->range + rlen / sizeof(__be32); 250 251 return 0; 252 } 253 EXPORT_SYMBOL_GPL(of_pci_range_parser_init); 254 255 struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, 256 struct of_pci_range *range) 257 { 258 const int na = 3, ns = 2; 259 260 if (!range) 261 return NULL; 262 263 if (!parser->range || parser->range + parser->np > parser->end) 264 return NULL; 265 266 range->pci_space = be32_to_cpup(parser->range); 267 range->flags = of_bus_pci_get_flags(parser->range); 268 range->pci_addr = of_read_number(parser->range + 1, ns); 269 range->cpu_addr = of_translate_address(parser->node, 270 parser->range + na); 271 range->size = of_read_number(parser->range + parser->pna + na, ns); 272 273 parser->range += parser->np; 274 275 /* Now consume following elements while they are contiguous */ 276 while (parser->range + parser->np <= parser->end) { 277 u32 flags; 278 u64 pci_addr, cpu_addr, size; 279 280 flags = of_bus_pci_get_flags(parser->range); 281 pci_addr = of_read_number(parser->range + 1, ns); 282 cpu_addr = of_translate_address(parser->node, 283 parser->range + na); 284 size = of_read_number(parser->range + parser->pna + na, ns); 285 286 if (flags != range->flags) 287 break; 288 if (pci_addr != range->pci_addr + range->size || 289 cpu_addr != range->cpu_addr + range->size) 290 break; 291 292 range->size += size; 293 parser->range += parser->np; 294 } 295 296 return range; 297 } 298 EXPORT_SYMBOL_GPL(of_pci_range_parser_one); 299 300 /* 301 * of_pci_range_to_resource - Create a resource from an of_pci_range 302 * @range: the PCI range that describes the resource 303 * @np: device node where the range belongs to 304 * @res: pointer to a valid resource that will be updated to 305 * reflect the values contained in the range. 306 * 307 * Returns EINVAL if the range cannot be converted to resource. 308 * 309 * Note that if the range is an IO range, the resource will be converted 310 * using pci_address_to_pio() which can fail if it is called too early or 311 * if the range cannot be matched to any host bridge IO space (our case here). 312 * To guard against that we try to register the IO range first. 313 * If that fails we know that pci_address_to_pio() will do too. 314 */ 315 int of_pci_range_to_resource(struct of_pci_range *range, 316 struct device_node *np, struct resource *res) 317 { 318 int err; 319 res->flags = range->flags; 320 res->parent = res->child = res->sibling = NULL; 321 res->name = np->full_name; 322 323 if (res->flags & IORESOURCE_IO) { 324 unsigned long port; 325 err = pci_register_io_range(range->cpu_addr, range->size); 326 if (err) 327 goto invalid_range; 328 port = pci_address_to_pio(range->cpu_addr); 329 if (port == (unsigned long)-1) { 330 err = -EINVAL; 331 goto invalid_range; 332 } 333 res->start = port; 334 } else { 335 if ((sizeof(resource_size_t) < 8) && 336 upper_32_bits(range->cpu_addr)) { 337 err = -EINVAL; 338 goto invalid_range; 339 } 340 341 res->start = range->cpu_addr; 342 } 343 res->end = res->start + range->size - 1; 344 return 0; 345 346 invalid_range: 347 res->start = (resource_size_t)OF_BAD_ADDR; 348 res->end = (resource_size_t)OF_BAD_ADDR; 349 return err; 350 } 351 #endif /* CONFIG_PCI */ 352 353 /* 354 * ISA bus specific translator 355 */ 356 357 static int of_bus_isa_match(struct device_node *np) 358 { 359 return !strcmp(np->name, "isa"); 360 } 361 362 static void of_bus_isa_count_cells(struct device_node *child, 363 int *addrc, int *sizec) 364 { 365 if (addrc) 366 *addrc = 2; 367 if (sizec) 368 *sizec = 1; 369 } 370 371 static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, 372 int pna) 373 { 374 u64 cp, s, da; 375 376 /* Check address type match */ 377 if ((addr[0] ^ range[0]) & cpu_to_be32(1)) 378 return OF_BAD_ADDR; 379 380 /* Read address values, skipping high cell */ 381 cp = of_read_number(range + 1, na - 1); 382 s = of_read_number(range + na + pna, ns); 383 da = of_read_number(addr + 1, na - 1); 384 385 pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", 386 (unsigned long long)cp, (unsigned long long)s, 387 (unsigned long long)da); 388 389 if (da < cp || da >= (cp + s)) 390 return OF_BAD_ADDR; 391 return da - cp; 392 } 393 394 static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) 395 { 396 return of_bus_default_translate(addr + 1, offset, na - 1); 397 } 398 399 static unsigned int of_bus_isa_get_flags(const __be32 *addr) 400 { 401 unsigned int flags = 0; 402 u32 w = be32_to_cpup(addr); 403 404 if (w & 1) 405 flags |= IORESOURCE_IO; 406 else 407 flags |= IORESOURCE_MEM; 408 return flags; 409 } 410 411 /* 412 * Array of bus specific translators 413 */ 414 415 static struct of_bus of_busses[] = { 416 #ifdef CONFIG_OF_ADDRESS_PCI 417 /* PCI */ 418 { 419 .name = "pci", 420 .addresses = "assigned-addresses", 421 .match = of_bus_pci_match, 422 .count_cells = of_bus_pci_count_cells, 423 .map = of_bus_pci_map, 424 .translate = of_bus_pci_translate, 425 .get_flags = of_bus_pci_get_flags, 426 }, 427 #endif /* CONFIG_OF_ADDRESS_PCI */ 428 /* ISA */ 429 { 430 .name = "isa", 431 .addresses = "reg", 432 .match = of_bus_isa_match, 433 .count_cells = of_bus_isa_count_cells, 434 .map = of_bus_isa_map, 435 .translate = of_bus_isa_translate, 436 .get_flags = of_bus_isa_get_flags, 437 }, 438 /* Default */ 439 { 440 .name = "default", 441 .addresses = "reg", 442 .match = NULL, 443 .count_cells = of_bus_default_count_cells, 444 .map = of_bus_default_map, 445 .translate = of_bus_default_translate, 446 .get_flags = of_bus_default_get_flags, 447 }, 448 }; 449 450 static struct of_bus *of_match_bus(struct device_node *np) 451 { 452 int i; 453 454 for (i = 0; i < ARRAY_SIZE(of_busses); i++) 455 if (!of_busses[i].match || of_busses[i].match(np)) 456 return &of_busses[i]; 457 BUG(); 458 return NULL; 459 } 460 461 static int of_empty_ranges_quirk(struct device_node *np) 462 { 463 if (IS_ENABLED(CONFIG_PPC)) { 464 /* To save cycles, we cache the result for global "Mac" setting */ 465 static int quirk_state = -1; 466 467 /* PA-SEMI sdc DT bug */ 468 if (of_device_is_compatible(np, "1682m-sdc")) 469 return true; 470 471 /* Make quirk cached */ 472 if (quirk_state < 0) 473 quirk_state = 474 of_machine_is_compatible("Power Macintosh") || 475 of_machine_is_compatible("MacRISC"); 476 return quirk_state; 477 } 478 return false; 479 } 480 481 static int of_translate_one(struct device_node *parent, struct of_bus *bus, 482 struct of_bus *pbus, __be32 *addr, 483 int na, int ns, int pna, const char *rprop) 484 { 485 const __be32 *ranges; 486 unsigned int rlen; 487 int rone; 488 u64 offset = OF_BAD_ADDR; 489 490 /* 491 * Normally, an absence of a "ranges" property means we are 492 * crossing a non-translatable boundary, and thus the addresses 493 * below the current cannot be converted to CPU physical ones. 494 * Unfortunately, while this is very clear in the spec, it's not 495 * what Apple understood, and they do have things like /uni-n or 496 * /ht nodes with no "ranges" property and a lot of perfectly 497 * useable mapped devices below them. Thus we treat the absence of 498 * "ranges" as equivalent to an empty "ranges" property which means 499 * a 1:1 translation at that level. It's up to the caller not to try 500 * to translate addresses that aren't supposed to be translated in 501 * the first place. --BenH. 502 * 503 * As far as we know, this damage only exists on Apple machines, so 504 * This code is only enabled on powerpc. --gcl 505 */ 506 ranges = of_get_property(parent, rprop, &rlen); 507 if (ranges == NULL && !of_empty_ranges_quirk(parent)) { 508 pr_debug("no ranges; cannot translate\n"); 509 return 1; 510 } 511 if (ranges == NULL || rlen == 0) { 512 offset = of_read_number(addr, na); 513 memset(addr, 0, pna * 4); 514 pr_debug("empty ranges; 1:1 translation\n"); 515 goto finish; 516 } 517 518 pr_debug("walking ranges...\n"); 519 520 /* Now walk through the ranges */ 521 rlen /= 4; 522 rone = na + pna + ns; 523 for (; rlen >= rone; rlen -= rone, ranges += rone) { 524 offset = bus->map(addr, ranges, na, ns, pna); 525 if (offset != OF_BAD_ADDR) 526 break; 527 } 528 if (offset == OF_BAD_ADDR) { 529 pr_debug("not found !\n"); 530 return 1; 531 } 532 memcpy(addr, ranges + na, 4 * pna); 533 534 finish: 535 of_dump_addr("parent translation for:", addr, pna); 536 pr_debug("with offset: %llx\n", (unsigned long long)offset); 537 538 /* Translate it into parent bus space */ 539 return pbus->translate(addr, offset, pna); 540 } 541 542 /* 543 * Translate an address from the device-tree into a CPU physical address, 544 * this walks up the tree and applies the various bus mappings on the 545 * way. 546 * 547 * Note: We consider that crossing any level with #size-cells == 0 to mean 548 * that translation is impossible (that is we are not dealing with a value 549 * that can be mapped to a cpu physical address). This is not really specified 550 * that way, but this is traditionally the way IBM at least do things 551 */ 552 static u64 __of_translate_address(struct device_node *dev, 553 const __be32 *in_addr, const char *rprop) 554 { 555 struct device_node *parent = NULL; 556 struct of_bus *bus, *pbus; 557 __be32 addr[OF_MAX_ADDR_CELLS]; 558 int na, ns, pna, pns; 559 u64 result = OF_BAD_ADDR; 560 561 pr_debug("** translation for device %pOF **\n", dev); 562 563 /* Increase refcount at current level */ 564 of_node_get(dev); 565 566 /* Get parent & match bus type */ 567 parent = of_get_parent(dev); 568 if (parent == NULL) 569 goto bail; 570 bus = of_match_bus(parent); 571 572 /* Count address cells & copy address locally */ 573 bus->count_cells(dev, &na, &ns); 574 if (!OF_CHECK_COUNTS(na, ns)) { 575 pr_debug("Bad cell count for %pOF\n", dev); 576 goto bail; 577 } 578 memcpy(addr, in_addr, na * 4); 579 580 pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n", 581 bus->name, na, ns, parent); 582 of_dump_addr("translating address:", addr, na); 583 584 /* Translate */ 585 for (;;) { 586 /* Switch to parent bus */ 587 of_node_put(dev); 588 dev = parent; 589 parent = of_get_parent(dev); 590 591 /* If root, we have finished */ 592 if (parent == NULL) { 593 pr_debug("reached root node\n"); 594 result = of_read_number(addr, na); 595 break; 596 } 597 598 /* Get new parent bus and counts */ 599 pbus = of_match_bus(parent); 600 pbus->count_cells(dev, &pna, &pns); 601 if (!OF_CHECK_COUNTS(pna, pns)) { 602 pr_err("Bad cell count for %pOF\n", dev); 603 break; 604 } 605 606 pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n", 607 pbus->name, pna, pns, parent); 608 609 /* Apply bus translation */ 610 if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) 611 break; 612 613 /* Complete the move up one level */ 614 na = pna; 615 ns = pns; 616 bus = pbus; 617 618 of_dump_addr("one level translation:", addr, na); 619 } 620 bail: 621 of_node_put(parent); 622 of_node_put(dev); 623 624 return result; 625 } 626 627 u64 of_translate_address(struct device_node *dev, const __be32 *in_addr) 628 { 629 return __of_translate_address(dev, in_addr, "ranges"); 630 } 631 EXPORT_SYMBOL(of_translate_address); 632 633 u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) 634 { 635 return __of_translate_address(dev, in_addr, "dma-ranges"); 636 } 637 EXPORT_SYMBOL(of_translate_dma_address); 638 639 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, 640 unsigned int *flags) 641 { 642 const __be32 *prop; 643 unsigned int psize; 644 struct device_node *parent; 645 struct of_bus *bus; 646 int onesize, i, na, ns; 647 648 /* Get parent & match bus type */ 649 parent = of_get_parent(dev); 650 if (parent == NULL) 651 return NULL; 652 bus = of_match_bus(parent); 653 bus->count_cells(dev, &na, &ns); 654 of_node_put(parent); 655 if (!OF_CHECK_ADDR_COUNT(na)) 656 return NULL; 657 658 /* Get "reg" or "assigned-addresses" property */ 659 prop = of_get_property(dev, bus->addresses, &psize); 660 if (prop == NULL) 661 return NULL; 662 psize /= 4; 663 664 onesize = na + ns; 665 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) 666 if (i == index) { 667 if (size) 668 *size = of_read_number(prop + na, ns); 669 if (flags) 670 *flags = bus->get_flags(prop); 671 return prop; 672 } 673 return NULL; 674 } 675 EXPORT_SYMBOL(of_get_address); 676 677 static int __of_address_to_resource(struct device_node *dev, 678 const __be32 *addrp, u64 size, unsigned int flags, 679 const char *name, struct resource *r) 680 { 681 u64 taddr; 682 683 if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 684 return -EINVAL; 685 taddr = of_translate_address(dev, addrp); 686 if (taddr == OF_BAD_ADDR) 687 return -EINVAL; 688 memset(r, 0, sizeof(struct resource)); 689 if (flags & IORESOURCE_IO) { 690 unsigned long port; 691 port = pci_address_to_pio(taddr); 692 if (port == (unsigned long)-1) 693 return -EINVAL; 694 r->start = port; 695 r->end = port + size - 1; 696 } else { 697 r->start = taddr; 698 r->end = taddr + size - 1; 699 } 700 r->flags = flags; 701 r->name = name ? name : dev->full_name; 702 703 return 0; 704 } 705 706 /** 707 * of_address_to_resource - Translate device tree address and return as resource 708 * 709 * Note that if your address is a PIO address, the conversion will fail if 710 * the physical address can't be internally converted to an IO token with 711 * pci_address_to_pio(), that is because it's either called too early or it 712 * can't be matched to any host bridge IO space 713 */ 714 int of_address_to_resource(struct device_node *dev, int index, 715 struct resource *r) 716 { 717 const __be32 *addrp; 718 u64 size; 719 unsigned int flags; 720 const char *name = NULL; 721 722 addrp = of_get_address(dev, index, &size, &flags); 723 if (addrp == NULL) 724 return -EINVAL; 725 726 /* Get optional "reg-names" property to add a name to a resource */ 727 of_property_read_string_index(dev, "reg-names", index, &name); 728 729 return __of_address_to_resource(dev, addrp, size, flags, name, r); 730 } 731 EXPORT_SYMBOL_GPL(of_address_to_resource); 732 733 struct device_node *of_find_matching_node_by_address(struct device_node *from, 734 const struct of_device_id *matches, 735 u64 base_address) 736 { 737 struct device_node *dn = of_find_matching_node(from, matches); 738 struct resource res; 739 740 while (dn) { 741 if (!of_address_to_resource(dn, 0, &res) && 742 res.start == base_address) 743 return dn; 744 745 dn = of_find_matching_node(dn, matches); 746 } 747 748 return NULL; 749 } 750 751 752 /** 753 * of_iomap - Maps the memory mapped IO for a given device_node 754 * @device: the device whose io range will be mapped 755 * @index: index of the io range 756 * 757 * Returns a pointer to the mapped memory 758 */ 759 void __iomem *of_iomap(struct device_node *np, int index) 760 { 761 struct resource res; 762 763 if (of_address_to_resource(np, index, &res)) 764 return NULL; 765 766 return ioremap(res.start, resource_size(&res)); 767 } 768 EXPORT_SYMBOL(of_iomap); 769 770 /* 771 * of_io_request_and_map - Requests a resource and maps the memory mapped IO 772 * for a given device_node 773 * @device: the device whose io range will be mapped 774 * @index: index of the io range 775 * @name: name of the resource 776 * 777 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded 778 * error code on failure. Usage example: 779 * 780 * base = of_io_request_and_map(node, 0, "foo"); 781 * if (IS_ERR(base)) 782 * return PTR_ERR(base); 783 */ 784 void __iomem *of_io_request_and_map(struct device_node *np, int index, 785 const char *name) 786 { 787 struct resource res; 788 void __iomem *mem; 789 790 if (of_address_to_resource(np, index, &res)) 791 return IOMEM_ERR_PTR(-EINVAL); 792 793 if (!request_mem_region(res.start, resource_size(&res), name)) 794 return IOMEM_ERR_PTR(-EBUSY); 795 796 mem = ioremap(res.start, resource_size(&res)); 797 if (!mem) { 798 release_mem_region(res.start, resource_size(&res)); 799 return IOMEM_ERR_PTR(-ENOMEM); 800 } 801 802 return mem; 803 } 804 EXPORT_SYMBOL(of_io_request_and_map); 805 806 /** 807 * of_dma_get_range - Get DMA range info 808 * @np: device node to get DMA range info 809 * @dma_addr: pointer to store initial DMA address of DMA range 810 * @paddr: pointer to store initial CPU address of DMA range 811 * @size: pointer to store size of DMA range 812 * 813 * Look in bottom up direction for the first "dma-ranges" property 814 * and parse it. 815 * dma-ranges format: 816 * DMA addr (dma_addr) : naddr cells 817 * CPU addr (phys_addr_t) : pna cells 818 * size : nsize cells 819 * 820 * It returns -ENODEV if "dma-ranges" property was not found 821 * for this device in DT. 822 */ 823 int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) 824 { 825 struct device_node *node = of_node_get(np); 826 const __be32 *ranges = NULL; 827 int len, naddr, nsize, pna; 828 int ret = 0; 829 u64 dmaaddr; 830 831 if (!node) 832 return -EINVAL; 833 834 while (1) { 835 naddr = of_n_addr_cells(node); 836 nsize = of_n_size_cells(node); 837 node = of_get_next_parent(node); 838 if (!node) 839 break; 840 841 ranges = of_get_property(node, "dma-ranges", &len); 842 843 /* Ignore empty ranges, they imply no translation required */ 844 if (ranges && len > 0) 845 break; 846 847 /* 848 * At least empty ranges has to be defined for parent node if 849 * DMA is supported 850 */ 851 if (!ranges) 852 break; 853 } 854 855 if (!ranges) { 856 pr_debug("no dma-ranges found for node(%pOF)\n", np); 857 ret = -ENODEV; 858 goto out; 859 } 860 861 len /= sizeof(u32); 862 863 pna = of_n_addr_cells(node); 864 865 /* dma-ranges format: 866 * DMA addr : naddr cells 867 * CPU addr : pna cells 868 * size : nsize cells 869 */ 870 dmaaddr = of_read_number(ranges, naddr); 871 *paddr = of_translate_dma_address(np, ranges); 872 if (*paddr == OF_BAD_ADDR) { 873 pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n", 874 dma_addr, np); 875 ret = -EINVAL; 876 goto out; 877 } 878 *dma_addr = dmaaddr; 879 880 *size = of_read_number(ranges + naddr + pna, nsize); 881 882 pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", 883 *dma_addr, *paddr, *size); 884 885 out: 886 of_node_put(node); 887 888 return ret; 889 } 890 EXPORT_SYMBOL_GPL(of_dma_get_range); 891 892 /** 893 * of_dma_is_coherent - Check if device is coherent 894 * @np: device node 895 * 896 * It returns true if "dma-coherent" property was found 897 * for this device in DT. 898 */ 899 bool of_dma_is_coherent(struct device_node *np) 900 { 901 struct device_node *node = of_node_get(np); 902 903 while (node) { 904 if (of_property_read_bool(node, "dma-coherent")) { 905 of_node_put(node); 906 return true; 907 } 908 node = of_get_next_parent(node); 909 } 910 of_node_put(node); 911 return false; 912 } 913 EXPORT_SYMBOL_GPL(of_dma_is_coherent); 914