1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/resource.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 7 * 8 * Arbitrary resource management. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/export.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/fs.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pseudo_fs.h> 22 #include <linux/sched.h> 23 #include <linux/seq_file.h> 24 #include <linux/device.h> 25 #include <linux/pfn.h> 26 #include <linux/mm.h> 27 #include <linux/mount.h> 28 #include <linux/resource_ext.h> 29 #include <uapi/linux/magic.h> 30 #include <asm/io.h> 31 32 33 struct resource ioport_resource = { 34 .name = "PCI IO", 35 .start = 0, 36 .end = IO_SPACE_LIMIT, 37 .flags = IORESOURCE_IO, 38 }; 39 EXPORT_SYMBOL(ioport_resource); 40 41 struct resource iomem_resource = { 42 .name = "PCI mem", 43 .start = 0, 44 .end = -1, 45 .flags = IORESOURCE_MEM, 46 }; 47 EXPORT_SYMBOL(iomem_resource); 48 49 /* constraints to be met while allocating resources */ 50 struct resource_constraint { 51 resource_size_t min, max, align; 52 resource_size_t (*alignf)(void *, const struct resource *, 53 resource_size_t, resource_size_t); 54 void *alignf_data; 55 }; 56 57 static DEFINE_RWLOCK(resource_lock); 58 59 /* 60 * For memory hotplug, there is no way to free resource entries allocated 61 * by boot mem after the system is up. So for reusing the resource entry 62 * we need to remember the resource. 63 */ 64 static struct resource *bootmem_resource_free; 65 static DEFINE_SPINLOCK(bootmem_resource_lock); 66 67 static struct resource *next_resource(struct resource *p, bool sibling_only) 68 { 69 /* Caller wants to traverse through siblings only */ 70 if (sibling_only) 71 return p->sibling; 72 73 if (p->child) 74 return p->child; 75 while (!p->sibling && p->parent) 76 p = p->parent; 77 return p->sibling; 78 } 79 80 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 81 { 82 struct resource *p = v; 83 (*pos)++; 84 return (void *)next_resource(p, false); 85 } 86 87 #ifdef CONFIG_PROC_FS 88 89 enum { MAX_IORES_LEVEL = 5 }; 90 91 static void *r_start(struct seq_file *m, loff_t *pos) 92 __acquires(resource_lock) 93 { 94 struct resource *p = PDE_DATA(file_inode(m->file)); 95 loff_t l = 0; 96 read_lock(&resource_lock); 97 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) 98 ; 99 return p; 100 } 101 102 static void r_stop(struct seq_file *m, void *v) 103 __releases(resource_lock) 104 { 105 read_unlock(&resource_lock); 106 } 107 108 static int r_show(struct seq_file *m, void *v) 109 { 110 struct resource *root = PDE_DATA(file_inode(m->file)); 111 struct resource *r = v, *p; 112 unsigned long long start, end; 113 int width = root->end < 0x10000 ? 4 : 8; 114 int depth; 115 116 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 117 if (p->parent == root) 118 break; 119 120 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { 121 start = r->start; 122 end = r->end; 123 } else { 124 start = end = 0; 125 } 126 127 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 128 depth * 2, "", 129 width, start, 130 width, end, 131 r->name ? r->name : "<BAD>"); 132 return 0; 133 } 134 135 static const struct seq_operations resource_op = { 136 .start = r_start, 137 .next = r_next, 138 .stop = r_stop, 139 .show = r_show, 140 }; 141 142 static int __init ioresources_init(void) 143 { 144 proc_create_seq_data("ioports", 0, NULL, &resource_op, 145 &ioport_resource); 146 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource); 147 return 0; 148 } 149 __initcall(ioresources_init); 150 151 #endif /* CONFIG_PROC_FS */ 152 153 static void free_resource(struct resource *res) 154 { 155 if (!res) 156 return; 157 158 if (!PageSlab(virt_to_head_page(res))) { 159 spin_lock(&bootmem_resource_lock); 160 res->sibling = bootmem_resource_free; 161 bootmem_resource_free = res; 162 spin_unlock(&bootmem_resource_lock); 163 } else { 164 kfree(res); 165 } 166 } 167 168 static struct resource *alloc_resource(gfp_t flags) 169 { 170 struct resource *res = NULL; 171 172 spin_lock(&bootmem_resource_lock); 173 if (bootmem_resource_free) { 174 res = bootmem_resource_free; 175 bootmem_resource_free = res->sibling; 176 } 177 spin_unlock(&bootmem_resource_lock); 178 179 if (res) 180 memset(res, 0, sizeof(struct resource)); 181 else 182 res = kzalloc(sizeof(struct resource), flags); 183 184 return res; 185 } 186 187 /* Return the conflict entry if you can't request it */ 188 static struct resource * __request_resource(struct resource *root, struct resource *new) 189 { 190 resource_size_t start = new->start; 191 resource_size_t end = new->end; 192 struct resource *tmp, **p; 193 194 if (end < start) 195 return root; 196 if (start < root->start) 197 return root; 198 if (end > root->end) 199 return root; 200 p = &root->child; 201 for (;;) { 202 tmp = *p; 203 if (!tmp || tmp->start > end) { 204 new->sibling = tmp; 205 *p = new; 206 new->parent = root; 207 return NULL; 208 } 209 p = &tmp->sibling; 210 if (tmp->end < start) 211 continue; 212 return tmp; 213 } 214 } 215 216 static int __release_resource(struct resource *old, bool release_child) 217 { 218 struct resource *tmp, **p, *chd; 219 220 p = &old->parent->child; 221 for (;;) { 222 tmp = *p; 223 if (!tmp) 224 break; 225 if (tmp == old) { 226 if (release_child || !(tmp->child)) { 227 *p = tmp->sibling; 228 } else { 229 for (chd = tmp->child;; chd = chd->sibling) { 230 chd->parent = tmp->parent; 231 if (!(chd->sibling)) 232 break; 233 } 234 *p = tmp->child; 235 chd->sibling = tmp->sibling; 236 } 237 old->parent = NULL; 238 return 0; 239 } 240 p = &tmp->sibling; 241 } 242 return -EINVAL; 243 } 244 245 static void __release_child_resources(struct resource *r) 246 { 247 struct resource *tmp, *p; 248 resource_size_t size; 249 250 p = r->child; 251 r->child = NULL; 252 while (p) { 253 tmp = p; 254 p = p->sibling; 255 256 tmp->parent = NULL; 257 tmp->sibling = NULL; 258 __release_child_resources(tmp); 259 260 printk(KERN_DEBUG "release child resource %pR\n", tmp); 261 /* need to restore size, and keep flags */ 262 size = resource_size(tmp); 263 tmp->start = 0; 264 tmp->end = size - 1; 265 } 266 } 267 268 void release_child_resources(struct resource *r) 269 { 270 write_lock(&resource_lock); 271 __release_child_resources(r); 272 write_unlock(&resource_lock); 273 } 274 275 /** 276 * request_resource_conflict - request and reserve an I/O or memory resource 277 * @root: root resource descriptor 278 * @new: resource descriptor desired by caller 279 * 280 * Returns 0 for success, conflict resource on error. 281 */ 282 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 283 { 284 struct resource *conflict; 285 286 write_lock(&resource_lock); 287 conflict = __request_resource(root, new); 288 write_unlock(&resource_lock); 289 return conflict; 290 } 291 292 /** 293 * request_resource - request and reserve an I/O or memory resource 294 * @root: root resource descriptor 295 * @new: resource descriptor desired by caller 296 * 297 * Returns 0 for success, negative error code on error. 298 */ 299 int request_resource(struct resource *root, struct resource *new) 300 { 301 struct resource *conflict; 302 303 conflict = request_resource_conflict(root, new); 304 return conflict ? -EBUSY : 0; 305 } 306 307 EXPORT_SYMBOL(request_resource); 308 309 /** 310 * release_resource - release a previously reserved resource 311 * @old: resource pointer 312 */ 313 int release_resource(struct resource *old) 314 { 315 int retval; 316 317 write_lock(&resource_lock); 318 retval = __release_resource(old, true); 319 write_unlock(&resource_lock); 320 return retval; 321 } 322 323 EXPORT_SYMBOL(release_resource); 324 325 /** 326 * find_next_iomem_res - Finds the lowest iomem resource that covers part of 327 * [@start..@end]. 328 * 329 * If a resource is found, returns 0 and @*res is overwritten with the part 330 * of the resource that's within [@start..@end]; if none is found, returns 331 * -ENODEV. Returns -EINVAL for invalid parameters. 332 * 333 * This function walks the whole tree and not just first level children 334 * unless @first_lvl is true. 335 * 336 * @start: start address of the resource searched for 337 * @end: end address of same resource 338 * @flags: flags which the resource must have 339 * @desc: descriptor the resource must have 340 * @first_lvl: walk only the first level children, if set 341 * @res: return ptr, if resource found 342 * 343 * The caller must specify @start, @end, @flags, and @desc 344 * (which may be IORES_DESC_NONE). 345 */ 346 static int find_next_iomem_res(resource_size_t start, resource_size_t end, 347 unsigned long flags, unsigned long desc, 348 bool first_lvl, struct resource *res) 349 { 350 bool siblings_only = true; 351 struct resource *p; 352 353 if (!res) 354 return -EINVAL; 355 356 if (start >= end) 357 return -EINVAL; 358 359 read_lock(&resource_lock); 360 361 for (p = iomem_resource.child; p; p = next_resource(p, siblings_only)) { 362 /* If we passed the resource we are looking for, stop */ 363 if (p->start > end) { 364 p = NULL; 365 break; 366 } 367 368 /* Skip until we find a range that matches what we look for */ 369 if (p->end < start) 370 continue; 371 372 /* 373 * Now that we found a range that matches what we look for, 374 * check the flags and the descriptor. If we were not asked to 375 * use only the first level, start looking at children as well. 376 */ 377 siblings_only = first_lvl; 378 379 if ((p->flags & flags) != flags) 380 continue; 381 if ((desc != IORES_DESC_NONE) && (desc != p->desc)) 382 continue; 383 384 /* Found a match, break */ 385 break; 386 } 387 388 if (p) { 389 /* copy data */ 390 *res = (struct resource) { 391 .start = max(start, p->start), 392 .end = min(end, p->end), 393 .flags = p->flags, 394 .desc = p->desc, 395 .parent = p->parent, 396 }; 397 } 398 399 read_unlock(&resource_lock); 400 return p ? 0 : -ENODEV; 401 } 402 403 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, 404 unsigned long flags, unsigned long desc, 405 bool first_lvl, void *arg, 406 int (*func)(struct resource *, void *)) 407 { 408 struct resource res; 409 int ret = -EINVAL; 410 411 while (start < end && 412 !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) { 413 ret = (*func)(&res, arg); 414 if (ret) 415 break; 416 417 start = res.end + 1; 418 } 419 420 return ret; 421 } 422 423 /** 424 * walk_iomem_res_desc - Walks through iomem resources and calls func() 425 * with matching resource ranges. 426 * * 427 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. 428 * @flags: I/O resource flags 429 * @start: start addr 430 * @end: end addr 431 * @arg: function argument for the callback @func 432 * @func: callback function that is called for each qualifying resource area 433 * 434 * This walks through whole tree and not just first level children. 435 * All the memory ranges which overlap start,end and also match flags and 436 * desc are valid candidates. 437 * 438 * NOTE: For a new descriptor search, define a new IORES_DESC in 439 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 440 */ 441 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, 442 u64 end, void *arg, int (*func)(struct resource *, void *)) 443 { 444 return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func); 445 } 446 EXPORT_SYMBOL_GPL(walk_iomem_res_desc); 447 448 /* 449 * This function calls the @func callback against all memory ranges of type 450 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 451 * Now, this function is only for System RAM, it deals with full ranges and 452 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate 453 * ranges. 454 */ 455 int walk_system_ram_res(u64 start, u64 end, void *arg, 456 int (*func)(struct resource *, void *)) 457 { 458 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 459 460 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true, 461 arg, func); 462 } 463 464 /* 465 * This function calls the @func callback against all memory ranges, which 466 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY. 467 */ 468 int walk_mem_res(u64 start, u64 end, void *arg, 469 int (*func)(struct resource *, void *)) 470 { 471 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 472 473 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true, 474 arg, func); 475 } 476 477 /* 478 * This function calls the @func callback against all memory ranges of type 479 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 480 * It is to be used only for System RAM. 481 * 482 * This will find System RAM ranges that are children of top-level resources 483 * in addition to top-level System RAM resources. 484 */ 485 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 486 void *arg, int (*func)(unsigned long, unsigned long, void *)) 487 { 488 resource_size_t start, end; 489 unsigned long flags; 490 struct resource res; 491 unsigned long pfn, end_pfn; 492 int ret = -EINVAL; 493 494 start = (u64) start_pfn << PAGE_SHIFT; 495 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 496 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 497 while (start < end && 498 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, 499 false, &res)) { 500 pfn = PFN_UP(res.start); 501 end_pfn = PFN_DOWN(res.end + 1); 502 if (end_pfn > pfn) 503 ret = (*func)(pfn, end_pfn - pfn, arg); 504 if (ret) 505 break; 506 start = res.end + 1; 507 } 508 return ret; 509 } 510 511 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 512 { 513 return 1; 514 } 515 516 /* 517 * This generic page_is_ram() returns true if specified address is 518 * registered as System RAM in iomem_resource list. 519 */ 520 int __weak page_is_ram(unsigned long pfn) 521 { 522 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 523 } 524 EXPORT_SYMBOL_GPL(page_is_ram); 525 526 /** 527 * region_intersects() - determine intersection of region with known resources 528 * @start: region start address 529 * @size: size of region 530 * @flags: flags of resource (in iomem_resource) 531 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE 532 * 533 * Check if the specified region partially overlaps or fully eclipses a 534 * resource identified by @flags and @desc (optional with IORES_DESC_NONE). 535 * Return REGION_DISJOINT if the region does not overlap @flags/@desc, 536 * return REGION_MIXED if the region overlaps @flags/@desc and another 537 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc 538 * and no other defined resource. Note that REGION_INTERSECTS is also 539 * returned in the case when the specified region overlaps RAM and undefined 540 * memory holes. 541 * 542 * region_intersect() is used by memory remapping functions to ensure 543 * the user is not remapping RAM and is a vast speed up over walking 544 * through the resource table page by page. 545 */ 546 int region_intersects(resource_size_t start, size_t size, unsigned long flags, 547 unsigned long desc) 548 { 549 struct resource res; 550 int type = 0; int other = 0; 551 struct resource *p; 552 553 res.start = start; 554 res.end = start + size - 1; 555 556 read_lock(&resource_lock); 557 for (p = iomem_resource.child; p ; p = p->sibling) { 558 bool is_type = (((p->flags & flags) == flags) && 559 ((desc == IORES_DESC_NONE) || 560 (desc == p->desc))); 561 562 if (resource_overlaps(p, &res)) 563 is_type ? type++ : other++; 564 } 565 read_unlock(&resource_lock); 566 567 if (type == 0) 568 return REGION_DISJOINT; 569 570 if (other == 0) 571 return REGION_INTERSECTS; 572 573 return REGION_MIXED; 574 } 575 EXPORT_SYMBOL_GPL(region_intersects); 576 577 void __weak arch_remove_reservations(struct resource *avail) 578 { 579 } 580 581 static resource_size_t simple_align_resource(void *data, 582 const struct resource *avail, 583 resource_size_t size, 584 resource_size_t align) 585 { 586 return avail->start; 587 } 588 589 static void resource_clip(struct resource *res, resource_size_t min, 590 resource_size_t max) 591 { 592 if (res->start < min) 593 res->start = min; 594 if (res->end > max) 595 res->end = max; 596 } 597 598 /* 599 * Find empty slot in the resource tree with the given range and 600 * alignment constraints 601 */ 602 static int __find_resource(struct resource *root, struct resource *old, 603 struct resource *new, 604 resource_size_t size, 605 struct resource_constraint *constraint) 606 { 607 struct resource *this = root->child; 608 struct resource tmp = *new, avail, alloc; 609 610 tmp.start = root->start; 611 /* 612 * Skip past an allocated resource that starts at 0, since the assignment 613 * of this->start - 1 to tmp->end below would cause an underflow. 614 */ 615 if (this && this->start == root->start) { 616 tmp.start = (this == old) ? old->start : this->end + 1; 617 this = this->sibling; 618 } 619 for(;;) { 620 if (this) 621 tmp.end = (this == old) ? this->end : this->start - 1; 622 else 623 tmp.end = root->end; 624 625 if (tmp.end < tmp.start) 626 goto next; 627 628 resource_clip(&tmp, constraint->min, constraint->max); 629 arch_remove_reservations(&tmp); 630 631 /* Check for overflow after ALIGN() */ 632 avail.start = ALIGN(tmp.start, constraint->align); 633 avail.end = tmp.end; 634 avail.flags = new->flags & ~IORESOURCE_UNSET; 635 if (avail.start >= tmp.start) { 636 alloc.flags = avail.flags; 637 alloc.start = constraint->alignf(constraint->alignf_data, &avail, 638 size, constraint->align); 639 alloc.end = alloc.start + size - 1; 640 if (alloc.start <= alloc.end && 641 resource_contains(&avail, &alloc)) { 642 new->start = alloc.start; 643 new->end = alloc.end; 644 return 0; 645 } 646 } 647 648 next: if (!this || this->end == root->end) 649 break; 650 651 if (this != old) 652 tmp.start = this->end + 1; 653 this = this->sibling; 654 } 655 return -EBUSY; 656 } 657 658 /* 659 * Find empty slot in the resource tree given range and alignment. 660 */ 661 static int find_resource(struct resource *root, struct resource *new, 662 resource_size_t size, 663 struct resource_constraint *constraint) 664 { 665 return __find_resource(root, NULL, new, size, constraint); 666 } 667 668 /** 669 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 670 * The resource will be relocated if the new size cannot be reallocated in the 671 * current location. 672 * 673 * @root: root resource descriptor 674 * @old: resource descriptor desired by caller 675 * @newsize: new size of the resource descriptor 676 * @constraint: the size and alignment constraints to be met. 677 */ 678 static int reallocate_resource(struct resource *root, struct resource *old, 679 resource_size_t newsize, 680 struct resource_constraint *constraint) 681 { 682 int err=0; 683 struct resource new = *old; 684 struct resource *conflict; 685 686 write_lock(&resource_lock); 687 688 if ((err = __find_resource(root, old, &new, newsize, constraint))) 689 goto out; 690 691 if (resource_contains(&new, old)) { 692 old->start = new.start; 693 old->end = new.end; 694 goto out; 695 } 696 697 if (old->child) { 698 err = -EBUSY; 699 goto out; 700 } 701 702 if (resource_contains(old, &new)) { 703 old->start = new.start; 704 old->end = new.end; 705 } else { 706 __release_resource(old, true); 707 *old = new; 708 conflict = __request_resource(root, old); 709 BUG_ON(conflict); 710 } 711 out: 712 write_unlock(&resource_lock); 713 return err; 714 } 715 716 717 /** 718 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 719 * The resource will be reallocated with a new size if it was already allocated 720 * @root: root resource descriptor 721 * @new: resource descriptor desired by caller 722 * @size: requested resource region size 723 * @min: minimum boundary to allocate 724 * @max: maximum boundary to allocate 725 * @align: alignment requested, in bytes 726 * @alignf: alignment function, optional, called if not NULL 727 * @alignf_data: arbitrary data to pass to the @alignf function 728 */ 729 int allocate_resource(struct resource *root, struct resource *new, 730 resource_size_t size, resource_size_t min, 731 resource_size_t max, resource_size_t align, 732 resource_size_t (*alignf)(void *, 733 const struct resource *, 734 resource_size_t, 735 resource_size_t), 736 void *alignf_data) 737 { 738 int err; 739 struct resource_constraint constraint; 740 741 if (!alignf) 742 alignf = simple_align_resource; 743 744 constraint.min = min; 745 constraint.max = max; 746 constraint.align = align; 747 constraint.alignf = alignf; 748 constraint.alignf_data = alignf_data; 749 750 if ( new->parent ) { 751 /* resource is already allocated, try reallocating with 752 the new constraints */ 753 return reallocate_resource(root, new, size, &constraint); 754 } 755 756 write_lock(&resource_lock); 757 err = find_resource(root, new, size, &constraint); 758 if (err >= 0 && __request_resource(root, new)) 759 err = -EBUSY; 760 write_unlock(&resource_lock); 761 return err; 762 } 763 764 EXPORT_SYMBOL(allocate_resource); 765 766 /** 767 * lookup_resource - find an existing resource by a resource start address 768 * @root: root resource descriptor 769 * @start: resource start address 770 * 771 * Returns a pointer to the resource if found, NULL otherwise 772 */ 773 struct resource *lookup_resource(struct resource *root, resource_size_t start) 774 { 775 struct resource *res; 776 777 read_lock(&resource_lock); 778 for (res = root->child; res; res = res->sibling) { 779 if (res->start == start) 780 break; 781 } 782 read_unlock(&resource_lock); 783 784 return res; 785 } 786 787 /* 788 * Insert a resource into the resource tree. If successful, return NULL, 789 * otherwise return the conflicting resource (compare to __request_resource()) 790 */ 791 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 792 { 793 struct resource *first, *next; 794 795 for (;; parent = first) { 796 first = __request_resource(parent, new); 797 if (!first) 798 return first; 799 800 if (first == parent) 801 return first; 802 if (WARN_ON(first == new)) /* duplicated insertion */ 803 return first; 804 805 if ((first->start > new->start) || (first->end < new->end)) 806 break; 807 if ((first->start == new->start) && (first->end == new->end)) 808 break; 809 } 810 811 for (next = first; ; next = next->sibling) { 812 /* Partial overlap? Bad, and unfixable */ 813 if (next->start < new->start || next->end > new->end) 814 return next; 815 if (!next->sibling) 816 break; 817 if (next->sibling->start > new->end) 818 break; 819 } 820 821 new->parent = parent; 822 new->sibling = next->sibling; 823 new->child = first; 824 825 next->sibling = NULL; 826 for (next = first; next; next = next->sibling) 827 next->parent = new; 828 829 if (parent->child == first) { 830 parent->child = new; 831 } else { 832 next = parent->child; 833 while (next->sibling != first) 834 next = next->sibling; 835 next->sibling = new; 836 } 837 return NULL; 838 } 839 840 /** 841 * insert_resource_conflict - Inserts resource in the resource tree 842 * @parent: parent of the new resource 843 * @new: new resource to insert 844 * 845 * Returns 0 on success, conflict resource if the resource can't be inserted. 846 * 847 * This function is equivalent to request_resource_conflict when no conflict 848 * happens. If a conflict happens, and the conflicting resources 849 * entirely fit within the range of the new resource, then the new 850 * resource is inserted and the conflicting resources become children of 851 * the new resource. 852 * 853 * This function is intended for producers of resources, such as FW modules 854 * and bus drivers. 855 */ 856 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 857 { 858 struct resource *conflict; 859 860 write_lock(&resource_lock); 861 conflict = __insert_resource(parent, new); 862 write_unlock(&resource_lock); 863 return conflict; 864 } 865 866 /** 867 * insert_resource - Inserts a resource in the resource tree 868 * @parent: parent of the new resource 869 * @new: new resource to insert 870 * 871 * Returns 0 on success, -EBUSY if the resource can't be inserted. 872 * 873 * This function is intended for producers of resources, such as FW modules 874 * and bus drivers. 875 */ 876 int insert_resource(struct resource *parent, struct resource *new) 877 { 878 struct resource *conflict; 879 880 conflict = insert_resource_conflict(parent, new); 881 return conflict ? -EBUSY : 0; 882 } 883 EXPORT_SYMBOL_GPL(insert_resource); 884 885 /** 886 * insert_resource_expand_to_fit - Insert a resource into the resource tree 887 * @root: root resource descriptor 888 * @new: new resource to insert 889 * 890 * Insert a resource into the resource tree, possibly expanding it in order 891 * to make it encompass any conflicting resources. 892 */ 893 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 894 { 895 if (new->parent) 896 return; 897 898 write_lock(&resource_lock); 899 for (;;) { 900 struct resource *conflict; 901 902 conflict = __insert_resource(root, new); 903 if (!conflict) 904 break; 905 if (conflict == root) 906 break; 907 908 /* Ok, expand resource to cover the conflict, then try again .. */ 909 if (conflict->start < new->start) 910 new->start = conflict->start; 911 if (conflict->end > new->end) 912 new->end = conflict->end; 913 914 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 915 } 916 write_unlock(&resource_lock); 917 } 918 919 /** 920 * remove_resource - Remove a resource in the resource tree 921 * @old: resource to remove 922 * 923 * Returns 0 on success, -EINVAL if the resource is not valid. 924 * 925 * This function removes a resource previously inserted by insert_resource() 926 * or insert_resource_conflict(), and moves the children (if any) up to 927 * where they were before. insert_resource() and insert_resource_conflict() 928 * insert a new resource, and move any conflicting resources down to the 929 * children of the new resource. 930 * 931 * insert_resource(), insert_resource_conflict() and remove_resource() are 932 * intended for producers of resources, such as FW modules and bus drivers. 933 */ 934 int remove_resource(struct resource *old) 935 { 936 int retval; 937 938 write_lock(&resource_lock); 939 retval = __release_resource(old, false); 940 write_unlock(&resource_lock); 941 return retval; 942 } 943 EXPORT_SYMBOL_GPL(remove_resource); 944 945 static int __adjust_resource(struct resource *res, resource_size_t start, 946 resource_size_t size) 947 { 948 struct resource *tmp, *parent = res->parent; 949 resource_size_t end = start + size - 1; 950 int result = -EBUSY; 951 952 if (!parent) 953 goto skip; 954 955 if ((start < parent->start) || (end > parent->end)) 956 goto out; 957 958 if (res->sibling && (res->sibling->start <= end)) 959 goto out; 960 961 tmp = parent->child; 962 if (tmp != res) { 963 while (tmp->sibling != res) 964 tmp = tmp->sibling; 965 if (start <= tmp->end) 966 goto out; 967 } 968 969 skip: 970 for (tmp = res->child; tmp; tmp = tmp->sibling) 971 if ((tmp->start < start) || (tmp->end > end)) 972 goto out; 973 974 res->start = start; 975 res->end = end; 976 result = 0; 977 978 out: 979 return result; 980 } 981 982 /** 983 * adjust_resource - modify a resource's start and size 984 * @res: resource to modify 985 * @start: new start value 986 * @size: new size 987 * 988 * Given an existing resource, change its start and size to match the 989 * arguments. Returns 0 on success, -EBUSY if it can't fit. 990 * Existing children of the resource are assumed to be immutable. 991 */ 992 int adjust_resource(struct resource *res, resource_size_t start, 993 resource_size_t size) 994 { 995 int result; 996 997 write_lock(&resource_lock); 998 result = __adjust_resource(res, start, size); 999 write_unlock(&resource_lock); 1000 return result; 1001 } 1002 EXPORT_SYMBOL(adjust_resource); 1003 1004 static void __init 1005 __reserve_region_with_split(struct resource *root, resource_size_t start, 1006 resource_size_t end, const char *name) 1007 { 1008 struct resource *parent = root; 1009 struct resource *conflict; 1010 struct resource *res = alloc_resource(GFP_ATOMIC); 1011 struct resource *next_res = NULL; 1012 int type = resource_type(root); 1013 1014 if (!res) 1015 return; 1016 1017 res->name = name; 1018 res->start = start; 1019 res->end = end; 1020 res->flags = type | IORESOURCE_BUSY; 1021 res->desc = IORES_DESC_NONE; 1022 1023 while (1) { 1024 1025 conflict = __request_resource(parent, res); 1026 if (!conflict) { 1027 if (!next_res) 1028 break; 1029 res = next_res; 1030 next_res = NULL; 1031 continue; 1032 } 1033 1034 /* conflict covered whole area */ 1035 if (conflict->start <= res->start && 1036 conflict->end >= res->end) { 1037 free_resource(res); 1038 WARN_ON(next_res); 1039 break; 1040 } 1041 1042 /* failed, split and try again */ 1043 if (conflict->start > res->start) { 1044 end = res->end; 1045 res->end = conflict->start - 1; 1046 if (conflict->end < end) { 1047 next_res = alloc_resource(GFP_ATOMIC); 1048 if (!next_res) { 1049 free_resource(res); 1050 break; 1051 } 1052 next_res->name = name; 1053 next_res->start = conflict->end + 1; 1054 next_res->end = end; 1055 next_res->flags = type | IORESOURCE_BUSY; 1056 next_res->desc = IORES_DESC_NONE; 1057 } 1058 } else { 1059 res->start = conflict->end + 1; 1060 } 1061 } 1062 1063 } 1064 1065 void __init 1066 reserve_region_with_split(struct resource *root, resource_size_t start, 1067 resource_size_t end, const char *name) 1068 { 1069 int abort = 0; 1070 1071 write_lock(&resource_lock); 1072 if (root->start > start || root->end < end) { 1073 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 1074 (unsigned long long)start, (unsigned long long)end, 1075 root); 1076 if (start > root->end || end < root->start) 1077 abort = 1; 1078 else { 1079 if (end > root->end) 1080 end = root->end; 1081 if (start < root->start) 1082 start = root->start; 1083 pr_err("fixing request to [0x%llx-0x%llx]\n", 1084 (unsigned long long)start, 1085 (unsigned long long)end); 1086 } 1087 dump_stack(); 1088 } 1089 if (!abort) 1090 __reserve_region_with_split(root, start, end, name); 1091 write_unlock(&resource_lock); 1092 } 1093 1094 /** 1095 * resource_alignment - calculate resource's alignment 1096 * @res: resource pointer 1097 * 1098 * Returns alignment on success, 0 (invalid alignment) on failure. 1099 */ 1100 resource_size_t resource_alignment(struct resource *res) 1101 { 1102 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 1103 case IORESOURCE_SIZEALIGN: 1104 return resource_size(res); 1105 case IORESOURCE_STARTALIGN: 1106 return res->start; 1107 default: 1108 return 0; 1109 } 1110 } 1111 1112 /* 1113 * This is compatibility stuff for IO resources. 1114 * 1115 * Note how this, unlike the above, knows about 1116 * the IO flag meanings (busy etc). 1117 * 1118 * request_region creates a new busy region. 1119 * 1120 * release_region releases a matching busy region. 1121 */ 1122 1123 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 1124 1125 static struct inode *iomem_inode; 1126 1127 #ifdef CONFIG_IO_STRICT_DEVMEM 1128 static void revoke_iomem(struct resource *res) 1129 { 1130 /* pairs with smp_store_release() in iomem_init_inode() */ 1131 struct inode *inode = smp_load_acquire(&iomem_inode); 1132 1133 /* 1134 * Check that the initialization has completed. Losing the race 1135 * is ok because it means drivers are claiming resources before 1136 * the fs_initcall level of init and prevent iomem_get_mapping users 1137 * from establishing mappings. 1138 */ 1139 if (!inode) 1140 return; 1141 1142 /* 1143 * The expectation is that the driver has successfully marked 1144 * the resource busy by this point, so devmem_is_allowed() 1145 * should start returning false, however for performance this 1146 * does not iterate the entire resource range. 1147 */ 1148 if (devmem_is_allowed(PHYS_PFN(res->start)) && 1149 devmem_is_allowed(PHYS_PFN(res->end))) { 1150 /* 1151 * *cringe* iomem=relaxed says "go ahead, what's the 1152 * worst that can happen?" 1153 */ 1154 return; 1155 } 1156 1157 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); 1158 } 1159 #else 1160 static void revoke_iomem(struct resource *res) {} 1161 #endif 1162 1163 struct address_space *iomem_get_mapping(void) 1164 { 1165 /* 1166 * This function is only called from file open paths, hence guaranteed 1167 * that fs_initcalls have completed and no need to check for NULL. But 1168 * since revoke_iomem can be called before the initcall we still need 1169 * the barrier to appease checkers. 1170 */ 1171 return smp_load_acquire(&iomem_inode)->i_mapping; 1172 } 1173 1174 /** 1175 * __request_region - create a new busy resource region 1176 * @parent: parent resource descriptor 1177 * @start: resource start address 1178 * @n: resource region size 1179 * @name: reserving caller's ID string 1180 * @flags: IO resource flags 1181 */ 1182 struct resource * __request_region(struct resource *parent, 1183 resource_size_t start, resource_size_t n, 1184 const char *name, int flags) 1185 { 1186 DECLARE_WAITQUEUE(wait, current); 1187 struct resource *res = alloc_resource(GFP_KERNEL); 1188 struct resource *orig_parent = parent; 1189 1190 if (!res) 1191 return NULL; 1192 1193 res->name = name; 1194 res->start = start; 1195 res->end = start + n - 1; 1196 1197 write_lock(&resource_lock); 1198 1199 for (;;) { 1200 struct resource *conflict; 1201 1202 res->flags = resource_type(parent) | resource_ext_type(parent); 1203 res->flags |= IORESOURCE_BUSY | flags; 1204 res->desc = parent->desc; 1205 1206 conflict = __request_resource(parent, res); 1207 if (!conflict) 1208 break; 1209 /* 1210 * mm/hmm.c reserves physical addresses which then 1211 * become unavailable to other users. Conflicts are 1212 * not expected. Warn to aid debugging if encountered. 1213 */ 1214 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 1215 pr_warn("Unaddressable device %s %pR conflicts with %pR", 1216 conflict->name, conflict, res); 1217 } 1218 if (conflict != parent) { 1219 if (!(conflict->flags & IORESOURCE_BUSY)) { 1220 parent = conflict; 1221 continue; 1222 } 1223 } 1224 if (conflict->flags & flags & IORESOURCE_MUXED) { 1225 add_wait_queue(&muxed_resource_wait, &wait); 1226 write_unlock(&resource_lock); 1227 set_current_state(TASK_UNINTERRUPTIBLE); 1228 schedule(); 1229 remove_wait_queue(&muxed_resource_wait, &wait); 1230 write_lock(&resource_lock); 1231 continue; 1232 } 1233 /* Uhhuh, that didn't work out.. */ 1234 free_resource(res); 1235 res = NULL; 1236 break; 1237 } 1238 write_unlock(&resource_lock); 1239 1240 if (res && orig_parent == &iomem_resource) 1241 revoke_iomem(res); 1242 1243 return res; 1244 } 1245 EXPORT_SYMBOL(__request_region); 1246 1247 /** 1248 * __release_region - release a previously reserved resource region 1249 * @parent: parent resource descriptor 1250 * @start: resource start address 1251 * @n: resource region size 1252 * 1253 * The described resource region must match a currently busy region. 1254 */ 1255 void __release_region(struct resource *parent, resource_size_t start, 1256 resource_size_t n) 1257 { 1258 struct resource **p; 1259 resource_size_t end; 1260 1261 p = &parent->child; 1262 end = start + n - 1; 1263 1264 write_lock(&resource_lock); 1265 1266 for (;;) { 1267 struct resource *res = *p; 1268 1269 if (!res) 1270 break; 1271 if (res->start <= start && res->end >= end) { 1272 if (!(res->flags & IORESOURCE_BUSY)) { 1273 p = &res->child; 1274 continue; 1275 } 1276 if (res->start != start || res->end != end) 1277 break; 1278 *p = res->sibling; 1279 write_unlock(&resource_lock); 1280 if (res->flags & IORESOURCE_MUXED) 1281 wake_up(&muxed_resource_wait); 1282 free_resource(res); 1283 return; 1284 } 1285 p = &res->sibling; 1286 } 1287 1288 write_unlock(&resource_lock); 1289 1290 printk(KERN_WARNING "Trying to free nonexistent resource " 1291 "<%016llx-%016llx>\n", (unsigned long long)start, 1292 (unsigned long long)end); 1293 } 1294 EXPORT_SYMBOL(__release_region); 1295 1296 #ifdef CONFIG_MEMORY_HOTREMOVE 1297 /** 1298 * release_mem_region_adjustable - release a previously reserved memory region 1299 * @start: resource start address 1300 * @size: resource region size 1301 * 1302 * This interface is intended for memory hot-delete. The requested region 1303 * is released from a currently busy memory resource. The requested region 1304 * must either match exactly or fit into a single busy resource entry. In 1305 * the latter case, the remaining resource is adjusted accordingly. 1306 * Existing children of the busy memory resource must be immutable in the 1307 * request. 1308 * 1309 * Note: 1310 * - Additional release conditions, such as overlapping region, can be 1311 * supported after they are confirmed as valid cases. 1312 * - When a busy memory resource gets split into two entries, the code 1313 * assumes that all children remain in the lower address entry for 1314 * simplicity. Enhance this logic when necessary. 1315 */ 1316 void release_mem_region_adjustable(resource_size_t start, resource_size_t size) 1317 { 1318 struct resource *parent = &iomem_resource; 1319 struct resource *new_res = NULL; 1320 bool alloc_nofail = false; 1321 struct resource **p; 1322 struct resource *res; 1323 resource_size_t end; 1324 1325 end = start + size - 1; 1326 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) 1327 return; 1328 1329 /* 1330 * We free up quite a lot of memory on memory hotunplug (esp., memap), 1331 * just before releasing the region. This is highly unlikely to 1332 * fail - let's play save and make it never fail as the caller cannot 1333 * perform any error handling (e.g., trying to re-add memory will fail 1334 * similarly). 1335 */ 1336 retry: 1337 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0)); 1338 1339 p = &parent->child; 1340 write_lock(&resource_lock); 1341 1342 while ((res = *p)) { 1343 if (res->start >= end) 1344 break; 1345 1346 /* look for the next resource if it does not fit into */ 1347 if (res->start > start || res->end < end) { 1348 p = &res->sibling; 1349 continue; 1350 } 1351 1352 /* 1353 * All memory regions added from memory-hotplug path have the 1354 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have 1355 * this flag, we know that we are dealing with a resource coming 1356 * from HMM/devm. HMM/devm use another mechanism to add/release 1357 * a resource. This goes via devm_request_mem_region and 1358 * devm_release_mem_region. 1359 * HMM/devm take care to release their resources when they want, 1360 * so if we are dealing with them, let us just back off here. 1361 */ 1362 if (!(res->flags & IORESOURCE_SYSRAM)) { 1363 break; 1364 } 1365 1366 if (!(res->flags & IORESOURCE_MEM)) 1367 break; 1368 1369 if (!(res->flags & IORESOURCE_BUSY)) { 1370 p = &res->child; 1371 continue; 1372 } 1373 1374 /* found the target resource; let's adjust accordingly */ 1375 if (res->start == start && res->end == end) { 1376 /* free the whole entry */ 1377 *p = res->sibling; 1378 free_resource(res); 1379 } else if (res->start == start && res->end != end) { 1380 /* adjust the start */ 1381 WARN_ON_ONCE(__adjust_resource(res, end + 1, 1382 res->end - end)); 1383 } else if (res->start != start && res->end == end) { 1384 /* adjust the end */ 1385 WARN_ON_ONCE(__adjust_resource(res, res->start, 1386 start - res->start)); 1387 } else { 1388 /* split into two entries - we need a new resource */ 1389 if (!new_res) { 1390 new_res = alloc_resource(GFP_ATOMIC); 1391 if (!new_res) { 1392 alloc_nofail = true; 1393 write_unlock(&resource_lock); 1394 goto retry; 1395 } 1396 } 1397 new_res->name = res->name; 1398 new_res->start = end + 1; 1399 new_res->end = res->end; 1400 new_res->flags = res->flags; 1401 new_res->desc = res->desc; 1402 new_res->parent = res->parent; 1403 new_res->sibling = res->sibling; 1404 new_res->child = NULL; 1405 1406 if (WARN_ON_ONCE(__adjust_resource(res, res->start, 1407 start - res->start))) 1408 break; 1409 res->sibling = new_res; 1410 new_res = NULL; 1411 } 1412 1413 break; 1414 } 1415 1416 write_unlock(&resource_lock); 1417 free_resource(new_res); 1418 } 1419 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1420 1421 #ifdef CONFIG_MEMORY_HOTPLUG 1422 static bool system_ram_resources_mergeable(struct resource *r1, 1423 struct resource *r2) 1424 { 1425 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */ 1426 return r1->flags == r2->flags && r1->end + 1 == r2->start && 1427 r1->name == r2->name && r1->desc == r2->desc && 1428 !r1->child && !r2->child; 1429 } 1430 1431 /** 1432 * merge_system_ram_resource - mark the System RAM resource mergeable and try to 1433 * merge it with adjacent, mergeable resources 1434 * @res: resource descriptor 1435 * 1436 * This interface is intended for memory hotplug, whereby lots of contiguous 1437 * system ram resources are added (e.g., via add_memory*()) by a driver, and 1438 * the actual resource boundaries are not of interest (e.g., it might be 1439 * relevant for DIMMs). Only resources that are marked mergeable, that have the 1440 * same parent, and that don't have any children are considered. All mergeable 1441 * resources must be immutable during the request. 1442 * 1443 * Note: 1444 * - The caller has to make sure that no pointers to resources that are 1445 * marked mergeable are used anymore after this call - the resource might 1446 * be freed and the pointer might be stale! 1447 * - release_mem_region_adjustable() will split on demand on memory hotunplug 1448 */ 1449 void merge_system_ram_resource(struct resource *res) 1450 { 1451 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1452 struct resource *cur; 1453 1454 if (WARN_ON_ONCE((res->flags & flags) != flags)) 1455 return; 1456 1457 write_lock(&resource_lock); 1458 res->flags |= IORESOURCE_SYSRAM_MERGEABLE; 1459 1460 /* Try to merge with next item in the list. */ 1461 cur = res->sibling; 1462 if (cur && system_ram_resources_mergeable(res, cur)) { 1463 res->end = cur->end; 1464 res->sibling = cur->sibling; 1465 free_resource(cur); 1466 } 1467 1468 /* Try to merge with previous item in the list. */ 1469 cur = res->parent->child; 1470 while (cur && cur->sibling != res) 1471 cur = cur->sibling; 1472 if (cur && system_ram_resources_mergeable(cur, res)) { 1473 cur->end = res->end; 1474 cur->sibling = res->sibling; 1475 free_resource(res); 1476 } 1477 write_unlock(&resource_lock); 1478 } 1479 #endif /* CONFIG_MEMORY_HOTPLUG */ 1480 1481 /* 1482 * Managed region resource 1483 */ 1484 static void devm_resource_release(struct device *dev, void *ptr) 1485 { 1486 struct resource **r = ptr; 1487 1488 release_resource(*r); 1489 } 1490 1491 /** 1492 * devm_request_resource() - request and reserve an I/O or memory resource 1493 * @dev: device for which to request the resource 1494 * @root: root of the resource tree from which to request the resource 1495 * @new: descriptor of the resource to request 1496 * 1497 * This is a device-managed version of request_resource(). There is usually 1498 * no need to release resources requested by this function explicitly since 1499 * that will be taken care of when the device is unbound from its driver. 1500 * If for some reason the resource needs to be released explicitly, because 1501 * of ordering issues for example, drivers must call devm_release_resource() 1502 * rather than the regular release_resource(). 1503 * 1504 * When a conflict is detected between any existing resources and the newly 1505 * requested resource, an error message will be printed. 1506 * 1507 * Returns 0 on success or a negative error code on failure. 1508 */ 1509 int devm_request_resource(struct device *dev, struct resource *root, 1510 struct resource *new) 1511 { 1512 struct resource *conflict, **ptr; 1513 1514 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); 1515 if (!ptr) 1516 return -ENOMEM; 1517 1518 *ptr = new; 1519 1520 conflict = request_resource_conflict(root, new); 1521 if (conflict) { 1522 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", 1523 new, conflict->name, conflict); 1524 devres_free(ptr); 1525 return -EBUSY; 1526 } 1527 1528 devres_add(dev, ptr); 1529 return 0; 1530 } 1531 EXPORT_SYMBOL(devm_request_resource); 1532 1533 static int devm_resource_match(struct device *dev, void *res, void *data) 1534 { 1535 struct resource **ptr = res; 1536 1537 return *ptr == data; 1538 } 1539 1540 /** 1541 * devm_release_resource() - release a previously requested resource 1542 * @dev: device for which to release the resource 1543 * @new: descriptor of the resource to release 1544 * 1545 * Releases a resource previously requested using devm_request_resource(). 1546 */ 1547 void devm_release_resource(struct device *dev, struct resource *new) 1548 { 1549 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, 1550 new)); 1551 } 1552 EXPORT_SYMBOL(devm_release_resource); 1553 1554 struct region_devres { 1555 struct resource *parent; 1556 resource_size_t start; 1557 resource_size_t n; 1558 }; 1559 1560 static void devm_region_release(struct device *dev, void *res) 1561 { 1562 struct region_devres *this = res; 1563 1564 __release_region(this->parent, this->start, this->n); 1565 } 1566 1567 static int devm_region_match(struct device *dev, void *res, void *match_data) 1568 { 1569 struct region_devres *this = res, *match = match_data; 1570 1571 return this->parent == match->parent && 1572 this->start == match->start && this->n == match->n; 1573 } 1574 1575 struct resource * 1576 __devm_request_region(struct device *dev, struct resource *parent, 1577 resource_size_t start, resource_size_t n, const char *name) 1578 { 1579 struct region_devres *dr = NULL; 1580 struct resource *res; 1581 1582 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1583 GFP_KERNEL); 1584 if (!dr) 1585 return NULL; 1586 1587 dr->parent = parent; 1588 dr->start = start; 1589 dr->n = n; 1590 1591 res = __request_region(parent, start, n, name, 0); 1592 if (res) 1593 devres_add(dev, dr); 1594 else 1595 devres_free(dr); 1596 1597 return res; 1598 } 1599 EXPORT_SYMBOL(__devm_request_region); 1600 1601 void __devm_release_region(struct device *dev, struct resource *parent, 1602 resource_size_t start, resource_size_t n) 1603 { 1604 struct region_devres match_data = { parent, start, n }; 1605 1606 __release_region(parent, start, n); 1607 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1608 &match_data)); 1609 } 1610 EXPORT_SYMBOL(__devm_release_region); 1611 1612 /* 1613 * Reserve I/O ports or memory based on "reserve=" kernel parameter. 1614 */ 1615 #define MAXRESERVE 4 1616 static int __init reserve_setup(char *str) 1617 { 1618 static int reserved; 1619 static struct resource reserve[MAXRESERVE]; 1620 1621 for (;;) { 1622 unsigned int io_start, io_num; 1623 int x = reserved; 1624 struct resource *parent; 1625 1626 if (get_option(&str, &io_start) != 2) 1627 break; 1628 if (get_option(&str, &io_num) == 0) 1629 break; 1630 if (x < MAXRESERVE) { 1631 struct resource *res = reserve + x; 1632 1633 /* 1634 * If the region starts below 0x10000, we assume it's 1635 * I/O port space; otherwise assume it's memory. 1636 */ 1637 if (io_start < 0x10000) { 1638 res->flags = IORESOURCE_IO; 1639 parent = &ioport_resource; 1640 } else { 1641 res->flags = IORESOURCE_MEM; 1642 parent = &iomem_resource; 1643 } 1644 res->name = "reserved"; 1645 res->start = io_start; 1646 res->end = io_start + io_num - 1; 1647 res->flags |= IORESOURCE_BUSY; 1648 res->desc = IORES_DESC_NONE; 1649 res->child = NULL; 1650 if (request_resource(parent, res) == 0) 1651 reserved = x+1; 1652 } 1653 } 1654 return 1; 1655 } 1656 __setup("reserve=", reserve_setup); 1657 1658 /* 1659 * Check if the requested addr and size spans more than any slot in the 1660 * iomem resource tree. 1661 */ 1662 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1663 { 1664 struct resource *p = &iomem_resource; 1665 int err = 0; 1666 loff_t l; 1667 1668 read_lock(&resource_lock); 1669 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1670 /* 1671 * We can probably skip the resources without 1672 * IORESOURCE_IO attribute? 1673 */ 1674 if (p->start >= addr + size) 1675 continue; 1676 if (p->end < addr) 1677 continue; 1678 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1679 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 1680 continue; 1681 /* 1682 * if a resource is "BUSY", it's not a hardware resource 1683 * but a driver mapping of such a resource; we don't want 1684 * to warn for those; some drivers legitimately map only 1685 * partial hardware resources. (example: vesafb) 1686 */ 1687 if (p->flags & IORESOURCE_BUSY) 1688 continue; 1689 1690 printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n", 1691 (unsigned long long)addr, 1692 (unsigned long long)(addr + size - 1), 1693 p->name, p); 1694 err = -1; 1695 break; 1696 } 1697 read_unlock(&resource_lock); 1698 1699 return err; 1700 } 1701 1702 #ifdef CONFIG_STRICT_DEVMEM 1703 static int strict_iomem_checks = 1; 1704 #else 1705 static int strict_iomem_checks; 1706 #endif 1707 1708 /* 1709 * check if an address is reserved in the iomem resource tree 1710 * returns true if reserved, false if not reserved. 1711 */ 1712 bool iomem_is_exclusive(u64 addr) 1713 { 1714 struct resource *p = &iomem_resource; 1715 bool err = false; 1716 loff_t l; 1717 int size = PAGE_SIZE; 1718 1719 if (!strict_iomem_checks) 1720 return false; 1721 1722 addr = addr & PAGE_MASK; 1723 1724 read_lock(&resource_lock); 1725 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1726 /* 1727 * We can probably skip the resources without 1728 * IORESOURCE_IO attribute? 1729 */ 1730 if (p->start >= addr + size) 1731 break; 1732 if (p->end < addr) 1733 continue; 1734 /* 1735 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set 1736 * or CONFIG_IO_STRICT_DEVMEM is enabled and the 1737 * resource is busy. 1738 */ 1739 if ((p->flags & IORESOURCE_BUSY) == 0) 1740 continue; 1741 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) 1742 || p->flags & IORESOURCE_EXCLUSIVE) { 1743 err = true; 1744 break; 1745 } 1746 } 1747 read_unlock(&resource_lock); 1748 1749 return err; 1750 } 1751 1752 struct resource_entry *resource_list_create_entry(struct resource *res, 1753 size_t extra_size) 1754 { 1755 struct resource_entry *entry; 1756 1757 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); 1758 if (entry) { 1759 INIT_LIST_HEAD(&entry->node); 1760 entry->res = res ? res : &entry->__res; 1761 } 1762 1763 return entry; 1764 } 1765 EXPORT_SYMBOL(resource_list_create_entry); 1766 1767 void resource_list_free(struct list_head *head) 1768 { 1769 struct resource_entry *entry, *tmp; 1770 1771 list_for_each_entry_safe(entry, tmp, head, node) 1772 resource_list_destroy_entry(entry); 1773 } 1774 EXPORT_SYMBOL(resource_list_free); 1775 1776 #ifdef CONFIG_DEVICE_PRIVATE 1777 static struct resource *__request_free_mem_region(struct device *dev, 1778 struct resource *base, unsigned long size, const char *name) 1779 { 1780 resource_size_t end, addr; 1781 struct resource *res; 1782 1783 size = ALIGN(size, 1UL << PA_SECTION_SHIFT); 1784 end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1); 1785 addr = end - size + 1UL; 1786 1787 for (; addr > size && addr >= base->start; addr -= size) { 1788 if (region_intersects(addr, size, 0, IORES_DESC_NONE) != 1789 REGION_DISJOINT) 1790 continue; 1791 1792 if (dev) 1793 res = devm_request_mem_region(dev, addr, size, name); 1794 else 1795 res = request_mem_region(addr, size, name); 1796 if (!res) 1797 return ERR_PTR(-ENOMEM); 1798 res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 1799 return res; 1800 } 1801 1802 return ERR_PTR(-ERANGE); 1803 } 1804 1805 /** 1806 * devm_request_free_mem_region - find free region for device private memory 1807 * 1808 * @dev: device struct to bind the resource to 1809 * @size: size in bytes of the device memory to add 1810 * @base: resource tree to look in 1811 * 1812 * This function tries to find an empty range of physical address big enough to 1813 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE 1814 * memory, which in turn allocates struct pages. 1815 */ 1816 struct resource *devm_request_free_mem_region(struct device *dev, 1817 struct resource *base, unsigned long size) 1818 { 1819 return __request_free_mem_region(dev, base, size, dev_name(dev)); 1820 } 1821 EXPORT_SYMBOL_GPL(devm_request_free_mem_region); 1822 1823 struct resource *request_free_mem_region(struct resource *base, 1824 unsigned long size, const char *name) 1825 { 1826 return __request_free_mem_region(NULL, base, size, name); 1827 } 1828 EXPORT_SYMBOL_GPL(request_free_mem_region); 1829 1830 #endif /* CONFIG_DEVICE_PRIVATE */ 1831 1832 static int __init strict_iomem(char *str) 1833 { 1834 if (strstr(str, "relaxed")) 1835 strict_iomem_checks = 0; 1836 if (strstr(str, "strict")) 1837 strict_iomem_checks = 1; 1838 return 1; 1839 } 1840 1841 static int iomem_fs_init_fs_context(struct fs_context *fc) 1842 { 1843 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; 1844 } 1845 1846 static struct file_system_type iomem_fs_type = { 1847 .name = "iomem", 1848 .owner = THIS_MODULE, 1849 .init_fs_context = iomem_fs_init_fs_context, 1850 .kill_sb = kill_anon_super, 1851 }; 1852 1853 static int __init iomem_init_inode(void) 1854 { 1855 static struct vfsmount *iomem_vfs_mount; 1856 static int iomem_fs_cnt; 1857 struct inode *inode; 1858 int rc; 1859 1860 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt); 1861 if (rc < 0) { 1862 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc); 1863 return rc; 1864 } 1865 1866 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb); 1867 if (IS_ERR(inode)) { 1868 rc = PTR_ERR(inode); 1869 pr_err("Cannot allocate inode for iomem: %d\n", rc); 1870 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt); 1871 return rc; 1872 } 1873 1874 /* 1875 * Publish iomem revocation inode initialized. 1876 * Pairs with smp_load_acquire() in revoke_iomem(). 1877 */ 1878 smp_store_release(&iomem_inode, inode); 1879 1880 return 0; 1881 } 1882 1883 fs_initcall(iomem_init_inode); 1884 1885 __setup("iomem=", strict_iomem); 1886