1 /* 2 * linux/kernel/resource.c 3 * 4 * Copyright (C) 1999 Linus Torvalds 5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 6 * 7 * Arbitrary resource management. 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/export.h> 13 #include <linux/errno.h> 14 #include <linux/ioport.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/fs.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/device.h> 23 #include <linux/pfn.h> 24 #include <asm/io.h> 25 26 27 struct resource ioport_resource = { 28 .name = "PCI IO", 29 .start = 0, 30 .end = IO_SPACE_LIMIT, 31 .flags = IORESOURCE_IO, 32 }; 33 EXPORT_SYMBOL(ioport_resource); 34 35 struct resource iomem_resource = { 36 .name = "PCI mem", 37 .start = 0, 38 .end = -1, 39 .flags = IORESOURCE_MEM, 40 }; 41 EXPORT_SYMBOL(iomem_resource); 42 43 /* constraints to be met while allocating resources */ 44 struct resource_constraint { 45 resource_size_t min, max, align; 46 resource_size_t (*alignf)(void *, const struct resource *, 47 resource_size_t, resource_size_t); 48 void *alignf_data; 49 }; 50 51 static DEFINE_RWLOCK(resource_lock); 52 53 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 54 { 55 struct resource *p = v; 56 (*pos)++; 57 if (p->child) 58 return p->child; 59 while (!p->sibling && p->parent) 60 p = p->parent; 61 return p->sibling; 62 } 63 64 #ifdef CONFIG_PROC_FS 65 66 enum { MAX_IORES_LEVEL = 5 }; 67 68 static void *r_start(struct seq_file *m, loff_t *pos) 69 __acquires(resource_lock) 70 { 71 struct resource *p = m->private; 72 loff_t l = 0; 73 read_lock(&resource_lock); 74 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) 75 ; 76 return p; 77 } 78 79 static void r_stop(struct seq_file *m, void *v) 80 __releases(resource_lock) 81 { 82 read_unlock(&resource_lock); 83 } 84 85 static int r_show(struct seq_file *m, void *v) 86 { 87 struct resource *root = m->private; 88 struct resource *r = v, *p; 89 int width = root->end < 0x10000 ? 4 : 8; 90 int depth; 91 92 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 93 if (p->parent == root) 94 break; 95 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 96 depth * 2, "", 97 width, (unsigned long long) r->start, 98 width, (unsigned long long) r->end, 99 r->name ? r->name : "<BAD>"); 100 return 0; 101 } 102 103 static const struct seq_operations resource_op = { 104 .start = r_start, 105 .next = r_next, 106 .stop = r_stop, 107 .show = r_show, 108 }; 109 110 static int ioports_open(struct inode *inode, struct file *file) 111 { 112 int res = seq_open(file, &resource_op); 113 if (!res) { 114 struct seq_file *m = file->private_data; 115 m->private = &ioport_resource; 116 } 117 return res; 118 } 119 120 static int iomem_open(struct inode *inode, struct file *file) 121 { 122 int res = seq_open(file, &resource_op); 123 if (!res) { 124 struct seq_file *m = file->private_data; 125 m->private = &iomem_resource; 126 } 127 return res; 128 } 129 130 static const struct file_operations proc_ioports_operations = { 131 .open = ioports_open, 132 .read = seq_read, 133 .llseek = seq_lseek, 134 .release = seq_release, 135 }; 136 137 static const struct file_operations proc_iomem_operations = { 138 .open = iomem_open, 139 .read = seq_read, 140 .llseek = seq_lseek, 141 .release = seq_release, 142 }; 143 144 static int __init ioresources_init(void) 145 { 146 proc_create("ioports", 0, NULL, &proc_ioports_operations); 147 proc_create("iomem", 0, NULL, &proc_iomem_operations); 148 return 0; 149 } 150 __initcall(ioresources_init); 151 152 #endif /* CONFIG_PROC_FS */ 153 154 /* Return the conflict entry if you can't request it */ 155 static struct resource * __request_resource(struct resource *root, struct resource *new) 156 { 157 resource_size_t start = new->start; 158 resource_size_t end = new->end; 159 struct resource *tmp, **p; 160 161 if (end < start) 162 return root; 163 if (start < root->start) 164 return root; 165 if (end > root->end) 166 return root; 167 p = &root->child; 168 for (;;) { 169 tmp = *p; 170 if (!tmp || tmp->start > end) { 171 new->sibling = tmp; 172 *p = new; 173 new->parent = root; 174 return NULL; 175 } 176 p = &tmp->sibling; 177 if (tmp->end < start) 178 continue; 179 return tmp; 180 } 181 } 182 183 static int __release_resource(struct resource *old) 184 { 185 struct resource *tmp, **p; 186 187 p = &old->parent->child; 188 for (;;) { 189 tmp = *p; 190 if (!tmp) 191 break; 192 if (tmp == old) { 193 *p = tmp->sibling; 194 old->parent = NULL; 195 return 0; 196 } 197 p = &tmp->sibling; 198 } 199 return -EINVAL; 200 } 201 202 static void __release_child_resources(struct resource *r) 203 { 204 struct resource *tmp, *p; 205 resource_size_t size; 206 207 p = r->child; 208 r->child = NULL; 209 while (p) { 210 tmp = p; 211 p = p->sibling; 212 213 tmp->parent = NULL; 214 tmp->sibling = NULL; 215 __release_child_resources(tmp); 216 217 printk(KERN_DEBUG "release child resource %pR\n", tmp); 218 /* need to restore size, and keep flags */ 219 size = resource_size(tmp); 220 tmp->start = 0; 221 tmp->end = size - 1; 222 } 223 } 224 225 void release_child_resources(struct resource *r) 226 { 227 write_lock(&resource_lock); 228 __release_child_resources(r); 229 write_unlock(&resource_lock); 230 } 231 232 /** 233 * request_resource_conflict - request and reserve an I/O or memory resource 234 * @root: root resource descriptor 235 * @new: resource descriptor desired by caller 236 * 237 * Returns 0 for success, conflict resource on error. 238 */ 239 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 240 { 241 struct resource *conflict; 242 243 write_lock(&resource_lock); 244 conflict = __request_resource(root, new); 245 write_unlock(&resource_lock); 246 return conflict; 247 } 248 249 /** 250 * request_resource - request and reserve an I/O or memory resource 251 * @root: root resource descriptor 252 * @new: resource descriptor desired by caller 253 * 254 * Returns 0 for success, negative error code on error. 255 */ 256 int request_resource(struct resource *root, struct resource *new) 257 { 258 struct resource *conflict; 259 260 conflict = request_resource_conflict(root, new); 261 return conflict ? -EBUSY : 0; 262 } 263 264 EXPORT_SYMBOL(request_resource); 265 266 /** 267 * release_resource - release a previously reserved resource 268 * @old: resource pointer 269 */ 270 int release_resource(struct resource *old) 271 { 272 int retval; 273 274 write_lock(&resource_lock); 275 retval = __release_resource(old); 276 write_unlock(&resource_lock); 277 return retval; 278 } 279 280 EXPORT_SYMBOL(release_resource); 281 282 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) 283 /* 284 * Finds the lowest memory reosurce exists within [res->start.res->end) 285 * the caller must specify res->start, res->end, res->flags and "name". 286 * If found, returns 0, res is overwritten, if not found, returns -1. 287 */ 288 static int find_next_system_ram(struct resource *res, char *name) 289 { 290 resource_size_t start, end; 291 struct resource *p; 292 293 BUG_ON(!res); 294 295 start = res->start; 296 end = res->end; 297 BUG_ON(start >= end); 298 299 read_lock(&resource_lock); 300 for (p = iomem_resource.child; p ; p = p->sibling) { 301 /* system ram is just marked as IORESOURCE_MEM */ 302 if (p->flags != res->flags) 303 continue; 304 if (name && strcmp(p->name, name)) 305 continue; 306 if (p->start > end) { 307 p = NULL; 308 break; 309 } 310 if ((p->end >= start) && (p->start < end)) 311 break; 312 } 313 read_unlock(&resource_lock); 314 if (!p) 315 return -1; 316 /* copy data */ 317 if (res->start < p->start) 318 res->start = p->start; 319 if (res->end > p->end) 320 res->end = p->end; 321 return 0; 322 } 323 324 /* 325 * This function calls callback against all memory range of "System RAM" 326 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. 327 * Now, this function is only for "System RAM". 328 */ 329 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 330 void *arg, int (*func)(unsigned long, unsigned long, void *)) 331 { 332 struct resource res; 333 unsigned long pfn, end_pfn; 334 u64 orig_end; 335 int ret = -1; 336 337 res.start = (u64) start_pfn << PAGE_SHIFT; 338 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 339 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 340 orig_end = res.end; 341 while ((res.start < res.end) && 342 (find_next_system_ram(&res, "System RAM") >= 0)) { 343 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; 344 end_pfn = (res.end + 1) >> PAGE_SHIFT; 345 if (end_pfn > pfn) 346 ret = (*func)(pfn, end_pfn - pfn, arg); 347 if (ret) 348 break; 349 res.start = res.end + 1; 350 res.end = orig_end; 351 } 352 return ret; 353 } 354 355 #endif 356 357 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 358 { 359 return 1; 360 } 361 /* 362 * This generic page_is_ram() returns true if specified address is 363 * registered as "System RAM" in iomem_resource list. 364 */ 365 int __weak page_is_ram(unsigned long pfn) 366 { 367 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 368 } 369 370 void __weak arch_remove_reservations(struct resource *avail) 371 { 372 } 373 374 static resource_size_t simple_align_resource(void *data, 375 const struct resource *avail, 376 resource_size_t size, 377 resource_size_t align) 378 { 379 return avail->start; 380 } 381 382 static void resource_clip(struct resource *res, resource_size_t min, 383 resource_size_t max) 384 { 385 if (res->start < min) 386 res->start = min; 387 if (res->end > max) 388 res->end = max; 389 } 390 391 static bool resource_contains(struct resource *res1, struct resource *res2) 392 { 393 return res1->start <= res2->start && res1->end >= res2->end; 394 } 395 396 /* 397 * Find empty slot in the resource tree with the given range and 398 * alignment constraints 399 */ 400 static int __find_resource(struct resource *root, struct resource *old, 401 struct resource *new, 402 resource_size_t size, 403 struct resource_constraint *constraint) 404 { 405 struct resource *this = root->child; 406 struct resource tmp = *new, avail, alloc; 407 408 tmp.flags = new->flags; 409 tmp.start = root->start; 410 /* 411 * Skip past an allocated resource that starts at 0, since the assignment 412 * of this->start - 1 to tmp->end below would cause an underflow. 413 */ 414 if (this && this->start == root->start) { 415 tmp.start = (this == old) ? old->start : this->end + 1; 416 this = this->sibling; 417 } 418 for(;;) { 419 if (this) 420 tmp.end = (this == old) ? this->end : this->start - 1; 421 else 422 tmp.end = root->end; 423 424 if (tmp.end < tmp.start) 425 goto next; 426 427 resource_clip(&tmp, constraint->min, constraint->max); 428 arch_remove_reservations(&tmp); 429 430 /* Check for overflow after ALIGN() */ 431 avail = *new; 432 avail.start = ALIGN(tmp.start, constraint->align); 433 avail.end = tmp.end; 434 if (avail.start >= tmp.start) { 435 alloc.start = constraint->alignf(constraint->alignf_data, &avail, 436 size, constraint->align); 437 alloc.end = alloc.start + size - 1; 438 if (resource_contains(&avail, &alloc)) { 439 new->start = alloc.start; 440 new->end = alloc.end; 441 return 0; 442 } 443 } 444 445 next: if (!this || this->end == root->end) 446 break; 447 448 if (this != old) 449 tmp.start = this->end + 1; 450 this = this->sibling; 451 } 452 return -EBUSY; 453 } 454 455 /* 456 * Find empty slot in the resource tree given range and alignment. 457 */ 458 static int find_resource(struct resource *root, struct resource *new, 459 resource_size_t size, 460 struct resource_constraint *constraint) 461 { 462 return __find_resource(root, NULL, new, size, constraint); 463 } 464 465 /** 466 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 467 * The resource will be relocated if the new size cannot be reallocated in the 468 * current location. 469 * 470 * @root: root resource descriptor 471 * @old: resource descriptor desired by caller 472 * @newsize: new size of the resource descriptor 473 * @constraint: the size and alignment constraints to be met. 474 */ 475 int reallocate_resource(struct resource *root, struct resource *old, 476 resource_size_t newsize, 477 struct resource_constraint *constraint) 478 { 479 int err=0; 480 struct resource new = *old; 481 struct resource *conflict; 482 483 write_lock(&resource_lock); 484 485 if ((err = __find_resource(root, old, &new, newsize, constraint))) 486 goto out; 487 488 if (resource_contains(&new, old)) { 489 old->start = new.start; 490 old->end = new.end; 491 goto out; 492 } 493 494 if (old->child) { 495 err = -EBUSY; 496 goto out; 497 } 498 499 if (resource_contains(old, &new)) { 500 old->start = new.start; 501 old->end = new.end; 502 } else { 503 __release_resource(old); 504 *old = new; 505 conflict = __request_resource(root, old); 506 BUG_ON(conflict); 507 } 508 out: 509 write_unlock(&resource_lock); 510 return err; 511 } 512 513 514 /** 515 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 516 * The resource will be reallocated with a new size if it was already allocated 517 * @root: root resource descriptor 518 * @new: resource descriptor desired by caller 519 * @size: requested resource region size 520 * @min: minimum boundary to allocate 521 * @max: maximum boundary to allocate 522 * @align: alignment requested, in bytes 523 * @alignf: alignment function, optional, called if not NULL 524 * @alignf_data: arbitrary data to pass to the @alignf function 525 */ 526 int allocate_resource(struct resource *root, struct resource *new, 527 resource_size_t size, resource_size_t min, 528 resource_size_t max, resource_size_t align, 529 resource_size_t (*alignf)(void *, 530 const struct resource *, 531 resource_size_t, 532 resource_size_t), 533 void *alignf_data) 534 { 535 int err; 536 struct resource_constraint constraint; 537 538 if (!alignf) 539 alignf = simple_align_resource; 540 541 constraint.min = min; 542 constraint.max = max; 543 constraint.align = align; 544 constraint.alignf = alignf; 545 constraint.alignf_data = alignf_data; 546 547 if ( new->parent ) { 548 /* resource is already allocated, try reallocating with 549 the new constraints */ 550 return reallocate_resource(root, new, size, &constraint); 551 } 552 553 write_lock(&resource_lock); 554 err = find_resource(root, new, size, &constraint); 555 if (err >= 0 && __request_resource(root, new)) 556 err = -EBUSY; 557 write_unlock(&resource_lock); 558 return err; 559 } 560 561 EXPORT_SYMBOL(allocate_resource); 562 563 /** 564 * lookup_resource - find an existing resource by a resource start address 565 * @root: root resource descriptor 566 * @start: resource start address 567 * 568 * Returns a pointer to the resource if found, NULL otherwise 569 */ 570 struct resource *lookup_resource(struct resource *root, resource_size_t start) 571 { 572 struct resource *res; 573 574 read_lock(&resource_lock); 575 for (res = root->child; res; res = res->sibling) { 576 if (res->start == start) 577 break; 578 } 579 read_unlock(&resource_lock); 580 581 return res; 582 } 583 584 /* 585 * Insert a resource into the resource tree. If successful, return NULL, 586 * otherwise return the conflicting resource (compare to __request_resource()) 587 */ 588 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 589 { 590 struct resource *first, *next; 591 592 for (;; parent = first) { 593 first = __request_resource(parent, new); 594 if (!first) 595 return first; 596 597 if (first == parent) 598 return first; 599 if (WARN_ON(first == new)) /* duplicated insertion */ 600 return first; 601 602 if ((first->start > new->start) || (first->end < new->end)) 603 break; 604 if ((first->start == new->start) && (first->end == new->end)) 605 break; 606 } 607 608 for (next = first; ; next = next->sibling) { 609 /* Partial overlap? Bad, and unfixable */ 610 if (next->start < new->start || next->end > new->end) 611 return next; 612 if (!next->sibling) 613 break; 614 if (next->sibling->start > new->end) 615 break; 616 } 617 618 new->parent = parent; 619 new->sibling = next->sibling; 620 new->child = first; 621 622 next->sibling = NULL; 623 for (next = first; next; next = next->sibling) 624 next->parent = new; 625 626 if (parent->child == first) { 627 parent->child = new; 628 } else { 629 next = parent->child; 630 while (next->sibling != first) 631 next = next->sibling; 632 next->sibling = new; 633 } 634 return NULL; 635 } 636 637 /** 638 * insert_resource_conflict - Inserts resource in the resource tree 639 * @parent: parent of the new resource 640 * @new: new resource to insert 641 * 642 * Returns 0 on success, conflict resource if the resource can't be inserted. 643 * 644 * This function is equivalent to request_resource_conflict when no conflict 645 * happens. If a conflict happens, and the conflicting resources 646 * entirely fit within the range of the new resource, then the new 647 * resource is inserted and the conflicting resources become children of 648 * the new resource. 649 */ 650 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 651 { 652 struct resource *conflict; 653 654 write_lock(&resource_lock); 655 conflict = __insert_resource(parent, new); 656 write_unlock(&resource_lock); 657 return conflict; 658 } 659 660 /** 661 * insert_resource - Inserts a resource in the resource tree 662 * @parent: parent of the new resource 663 * @new: new resource to insert 664 * 665 * Returns 0 on success, -EBUSY if the resource can't be inserted. 666 */ 667 int insert_resource(struct resource *parent, struct resource *new) 668 { 669 struct resource *conflict; 670 671 conflict = insert_resource_conflict(parent, new); 672 return conflict ? -EBUSY : 0; 673 } 674 675 /** 676 * insert_resource_expand_to_fit - Insert a resource into the resource tree 677 * @root: root resource descriptor 678 * @new: new resource to insert 679 * 680 * Insert a resource into the resource tree, possibly expanding it in order 681 * to make it encompass any conflicting resources. 682 */ 683 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 684 { 685 if (new->parent) 686 return; 687 688 write_lock(&resource_lock); 689 for (;;) { 690 struct resource *conflict; 691 692 conflict = __insert_resource(root, new); 693 if (!conflict) 694 break; 695 if (conflict == root) 696 break; 697 698 /* Ok, expand resource to cover the conflict, then try again .. */ 699 if (conflict->start < new->start) 700 new->start = conflict->start; 701 if (conflict->end > new->end) 702 new->end = conflict->end; 703 704 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 705 } 706 write_unlock(&resource_lock); 707 } 708 709 /** 710 * adjust_resource - modify a resource's start and size 711 * @res: resource to modify 712 * @start: new start value 713 * @size: new size 714 * 715 * Given an existing resource, change its start and size to match the 716 * arguments. Returns 0 on success, -EBUSY if it can't fit. 717 * Existing children of the resource are assumed to be immutable. 718 */ 719 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) 720 { 721 struct resource *tmp, *parent = res->parent; 722 resource_size_t end = start + size - 1; 723 int result = -EBUSY; 724 725 write_lock(&resource_lock); 726 727 if (!parent) 728 goto skip; 729 730 if ((start < parent->start) || (end > parent->end)) 731 goto out; 732 733 if (res->sibling && (res->sibling->start <= end)) 734 goto out; 735 736 tmp = parent->child; 737 if (tmp != res) { 738 while (tmp->sibling != res) 739 tmp = tmp->sibling; 740 if (start <= tmp->end) 741 goto out; 742 } 743 744 skip: 745 for (tmp = res->child; tmp; tmp = tmp->sibling) 746 if ((tmp->start < start) || (tmp->end > end)) 747 goto out; 748 749 res->start = start; 750 res->end = end; 751 result = 0; 752 753 out: 754 write_unlock(&resource_lock); 755 return result; 756 } 757 EXPORT_SYMBOL(adjust_resource); 758 759 static void __init __reserve_region_with_split(struct resource *root, 760 resource_size_t start, resource_size_t end, 761 const char *name) 762 { 763 struct resource *parent = root; 764 struct resource *conflict; 765 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); 766 767 if (!res) 768 return; 769 770 res->name = name; 771 res->start = start; 772 res->end = end; 773 res->flags = IORESOURCE_BUSY; 774 775 conflict = __request_resource(parent, res); 776 if (!conflict) 777 return; 778 779 /* failed, split and try again */ 780 kfree(res); 781 782 /* conflict covered whole area */ 783 if (conflict->start <= start && conflict->end >= end) 784 return; 785 786 if (conflict->start > start) 787 __reserve_region_with_split(root, start, conflict->start-1, name); 788 if (conflict->end < end) 789 __reserve_region_with_split(root, conflict->end+1, end, name); 790 } 791 792 void __init reserve_region_with_split(struct resource *root, 793 resource_size_t start, resource_size_t end, 794 const char *name) 795 { 796 int abort = 0; 797 798 write_lock(&resource_lock); 799 if (root->start > start || root->end < end) { 800 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 801 (unsigned long long)start, (unsigned long long)end, 802 root); 803 if (start > root->end || end < root->start) 804 abort = 1; 805 else { 806 if (end > root->end) 807 end = root->end; 808 if (start < root->start) 809 start = root->start; 810 pr_err("fixing request to [0x%llx-0x%llx]\n", 811 (unsigned long long)start, 812 (unsigned long long)end); 813 } 814 dump_stack(); 815 } 816 if (!abort) 817 __reserve_region_with_split(root, start, end, name); 818 write_unlock(&resource_lock); 819 } 820 821 /** 822 * resource_alignment - calculate resource's alignment 823 * @res: resource pointer 824 * 825 * Returns alignment on success, 0 (invalid alignment) on failure. 826 */ 827 resource_size_t resource_alignment(struct resource *res) 828 { 829 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 830 case IORESOURCE_SIZEALIGN: 831 return resource_size(res); 832 case IORESOURCE_STARTALIGN: 833 return res->start; 834 default: 835 return 0; 836 } 837 } 838 839 /* 840 * This is compatibility stuff for IO resources. 841 * 842 * Note how this, unlike the above, knows about 843 * the IO flag meanings (busy etc). 844 * 845 * request_region creates a new busy region. 846 * 847 * check_region returns non-zero if the area is already busy. 848 * 849 * release_region releases a matching busy region. 850 */ 851 852 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 853 854 /** 855 * __request_region - create a new busy resource region 856 * @parent: parent resource descriptor 857 * @start: resource start address 858 * @n: resource region size 859 * @name: reserving caller's ID string 860 * @flags: IO resource flags 861 */ 862 struct resource * __request_region(struct resource *parent, 863 resource_size_t start, resource_size_t n, 864 const char *name, int flags) 865 { 866 DECLARE_WAITQUEUE(wait, current); 867 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 868 869 if (!res) 870 return NULL; 871 872 res->name = name; 873 res->start = start; 874 res->end = start + n - 1; 875 res->flags = IORESOURCE_BUSY; 876 res->flags |= flags; 877 878 write_lock(&resource_lock); 879 880 for (;;) { 881 struct resource *conflict; 882 883 conflict = __request_resource(parent, res); 884 if (!conflict) 885 break; 886 if (conflict != parent) { 887 parent = conflict; 888 if (!(conflict->flags & IORESOURCE_BUSY)) 889 continue; 890 } 891 if (conflict->flags & flags & IORESOURCE_MUXED) { 892 add_wait_queue(&muxed_resource_wait, &wait); 893 write_unlock(&resource_lock); 894 set_current_state(TASK_UNINTERRUPTIBLE); 895 schedule(); 896 remove_wait_queue(&muxed_resource_wait, &wait); 897 write_lock(&resource_lock); 898 continue; 899 } 900 /* Uhhuh, that didn't work out.. */ 901 kfree(res); 902 res = NULL; 903 break; 904 } 905 write_unlock(&resource_lock); 906 return res; 907 } 908 EXPORT_SYMBOL(__request_region); 909 910 /** 911 * __check_region - check if a resource region is busy or free 912 * @parent: parent resource descriptor 913 * @start: resource start address 914 * @n: resource region size 915 * 916 * Returns 0 if the region is free at the moment it is checked, 917 * returns %-EBUSY if the region is busy. 918 * 919 * NOTE: 920 * This function is deprecated because its use is racy. 921 * Even if it returns 0, a subsequent call to request_region() 922 * may fail because another driver etc. just allocated the region. 923 * Do NOT use it. It will be removed from the kernel. 924 */ 925 int __check_region(struct resource *parent, resource_size_t start, 926 resource_size_t n) 927 { 928 struct resource * res; 929 930 res = __request_region(parent, start, n, "check-region", 0); 931 if (!res) 932 return -EBUSY; 933 934 release_resource(res); 935 kfree(res); 936 return 0; 937 } 938 EXPORT_SYMBOL(__check_region); 939 940 /** 941 * __release_region - release a previously reserved resource region 942 * @parent: parent resource descriptor 943 * @start: resource start address 944 * @n: resource region size 945 * 946 * The described resource region must match a currently busy region. 947 */ 948 void __release_region(struct resource *parent, resource_size_t start, 949 resource_size_t n) 950 { 951 struct resource **p; 952 resource_size_t end; 953 954 p = &parent->child; 955 end = start + n - 1; 956 957 write_lock(&resource_lock); 958 959 for (;;) { 960 struct resource *res = *p; 961 962 if (!res) 963 break; 964 if (res->start <= start && res->end >= end) { 965 if (!(res->flags & IORESOURCE_BUSY)) { 966 p = &res->child; 967 continue; 968 } 969 if (res->start != start || res->end != end) 970 break; 971 *p = res->sibling; 972 write_unlock(&resource_lock); 973 if (res->flags & IORESOURCE_MUXED) 974 wake_up(&muxed_resource_wait); 975 kfree(res); 976 return; 977 } 978 p = &res->sibling; 979 } 980 981 write_unlock(&resource_lock); 982 983 printk(KERN_WARNING "Trying to free nonexistent resource " 984 "<%016llx-%016llx>\n", (unsigned long long)start, 985 (unsigned long long)end); 986 } 987 EXPORT_SYMBOL(__release_region); 988 989 /* 990 * Managed region resource 991 */ 992 struct region_devres { 993 struct resource *parent; 994 resource_size_t start; 995 resource_size_t n; 996 }; 997 998 static void devm_region_release(struct device *dev, void *res) 999 { 1000 struct region_devres *this = res; 1001 1002 __release_region(this->parent, this->start, this->n); 1003 } 1004 1005 static int devm_region_match(struct device *dev, void *res, void *match_data) 1006 { 1007 struct region_devres *this = res, *match = match_data; 1008 1009 return this->parent == match->parent && 1010 this->start == match->start && this->n == match->n; 1011 } 1012 1013 struct resource * __devm_request_region(struct device *dev, 1014 struct resource *parent, resource_size_t start, 1015 resource_size_t n, const char *name) 1016 { 1017 struct region_devres *dr = NULL; 1018 struct resource *res; 1019 1020 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1021 GFP_KERNEL); 1022 if (!dr) 1023 return NULL; 1024 1025 dr->parent = parent; 1026 dr->start = start; 1027 dr->n = n; 1028 1029 res = __request_region(parent, start, n, name, 0); 1030 if (res) 1031 devres_add(dev, dr); 1032 else 1033 devres_free(dr); 1034 1035 return res; 1036 } 1037 EXPORT_SYMBOL(__devm_request_region); 1038 1039 void __devm_release_region(struct device *dev, struct resource *parent, 1040 resource_size_t start, resource_size_t n) 1041 { 1042 struct region_devres match_data = { parent, start, n }; 1043 1044 __release_region(parent, start, n); 1045 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1046 &match_data)); 1047 } 1048 EXPORT_SYMBOL(__devm_release_region); 1049 1050 /* 1051 * Called from init/main.c to reserve IO ports. 1052 */ 1053 #define MAXRESERVE 4 1054 static int __init reserve_setup(char *str) 1055 { 1056 static int reserved; 1057 static struct resource reserve[MAXRESERVE]; 1058 1059 for (;;) { 1060 unsigned int io_start, io_num; 1061 int x = reserved; 1062 1063 if (get_option (&str, &io_start) != 2) 1064 break; 1065 if (get_option (&str, &io_num) == 0) 1066 break; 1067 if (x < MAXRESERVE) { 1068 struct resource *res = reserve + x; 1069 res->name = "reserved"; 1070 res->start = io_start; 1071 res->end = io_start + io_num - 1; 1072 res->flags = IORESOURCE_BUSY; 1073 res->child = NULL; 1074 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) 1075 reserved = x+1; 1076 } 1077 } 1078 return 1; 1079 } 1080 1081 __setup("reserve=", reserve_setup); 1082 1083 /* 1084 * Check if the requested addr and size spans more than any slot in the 1085 * iomem resource tree. 1086 */ 1087 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1088 { 1089 struct resource *p = &iomem_resource; 1090 int err = 0; 1091 loff_t l; 1092 1093 read_lock(&resource_lock); 1094 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1095 /* 1096 * We can probably skip the resources without 1097 * IORESOURCE_IO attribute? 1098 */ 1099 if (p->start >= addr + size) 1100 continue; 1101 if (p->end < addr) 1102 continue; 1103 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1104 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 1105 continue; 1106 /* 1107 * if a resource is "BUSY", it's not a hardware resource 1108 * but a driver mapping of such a resource; we don't want 1109 * to warn for those; some drivers legitimately map only 1110 * partial hardware resources. (example: vesafb) 1111 */ 1112 if (p->flags & IORESOURCE_BUSY) 1113 continue; 1114 1115 printk(KERN_WARNING "resource map sanity check conflict: " 1116 "0x%llx 0x%llx 0x%llx 0x%llx %s\n", 1117 (unsigned long long)addr, 1118 (unsigned long long)(addr + size - 1), 1119 (unsigned long long)p->start, 1120 (unsigned long long)p->end, 1121 p->name); 1122 err = -1; 1123 break; 1124 } 1125 read_unlock(&resource_lock); 1126 1127 return err; 1128 } 1129 1130 #ifdef CONFIG_STRICT_DEVMEM 1131 static int strict_iomem_checks = 1; 1132 #else 1133 static int strict_iomem_checks; 1134 #endif 1135 1136 /* 1137 * check if an address is reserved in the iomem resource tree 1138 * returns 1 if reserved, 0 if not reserved. 1139 */ 1140 int iomem_is_exclusive(u64 addr) 1141 { 1142 struct resource *p = &iomem_resource; 1143 int err = 0; 1144 loff_t l; 1145 int size = PAGE_SIZE; 1146 1147 if (!strict_iomem_checks) 1148 return 0; 1149 1150 addr = addr & PAGE_MASK; 1151 1152 read_lock(&resource_lock); 1153 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1154 /* 1155 * We can probably skip the resources without 1156 * IORESOURCE_IO attribute? 1157 */ 1158 if (p->start >= addr + size) 1159 break; 1160 if (p->end < addr) 1161 continue; 1162 if (p->flags & IORESOURCE_BUSY && 1163 p->flags & IORESOURCE_EXCLUSIVE) { 1164 err = 1; 1165 break; 1166 } 1167 } 1168 read_unlock(&resource_lock); 1169 1170 return err; 1171 } 1172 1173 static int __init strict_iomem(char *str) 1174 { 1175 if (strstr(str, "relaxed")) 1176 strict_iomem_checks = 0; 1177 if (strstr(str, "strict")) 1178 strict_iomem_checks = 1; 1179 return 1; 1180 } 1181 1182 __setup("iomem=", strict_iomem); 1183