1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
30 #include <asm/io.h>
31
32
33 struct resource ioport_resource = {
34 .name = "PCI IO",
35 .start = 0,
36 .end = IO_SPACE_LIMIT,
37 .flags = IORESOURCE_IO,
38 };
39 EXPORT_SYMBOL(ioport_resource);
40
41 struct resource iomem_resource = {
42 .name = "PCI mem",
43 .start = 0,
44 .end = -1,
45 .flags = IORESOURCE_MEM,
46 };
47 EXPORT_SYMBOL(iomem_resource);
48
49 /* constraints to be met while allocating resources */
50 struct resource_constraint {
51 resource_size_t min, max, align;
52 resource_size_t (*alignf)(void *, const struct resource *,
53 resource_size_t, resource_size_t);
54 void *alignf_data;
55 };
56
57 static DEFINE_RWLOCK(resource_lock);
58
next_resource(struct resource * p)59 static struct resource *next_resource(struct resource *p)
60 {
61 if (p->child)
62 return p->child;
63 while (!p->sibling && p->parent)
64 p = p->parent;
65 return p->sibling;
66 }
67
next_resource_skip_children(struct resource * p)68 static struct resource *next_resource_skip_children(struct resource *p)
69 {
70 while (!p->sibling && p->parent)
71 p = p->parent;
72 return p->sibling;
73 }
74
75 #define for_each_resource(_root, _p, _skip_children) \
76 for ((_p) = (_root)->child; (_p); \
77 (_p) = (_skip_children) ? next_resource_skip_children(_p) : \
78 next_resource(_p))
79
r_next(struct seq_file * m,void * v,loff_t * pos)80 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
81 {
82 struct resource *p = v;
83 (*pos)++;
84 return (void *)next_resource(p);
85 }
86
87 #ifdef CONFIG_PROC_FS
88
89 enum { MAX_IORES_LEVEL = 5 };
90
r_start(struct seq_file * m,loff_t * pos)91 static void *r_start(struct seq_file *m, loff_t *pos)
92 __acquires(resource_lock)
93 {
94 struct resource *p = pde_data(file_inode(m->file));
95 loff_t l = 0;
96 read_lock(&resource_lock);
97 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
98 ;
99 return p;
100 }
101
r_stop(struct seq_file * m,void * v)102 static void r_stop(struct seq_file *m, void *v)
103 __releases(resource_lock)
104 {
105 read_unlock(&resource_lock);
106 }
107
r_show(struct seq_file * m,void * v)108 static int r_show(struct seq_file *m, void *v)
109 {
110 struct resource *root = pde_data(file_inode(m->file));
111 struct resource *r = v, *p;
112 unsigned long long start, end;
113 int width = root->end < 0x10000 ? 4 : 8;
114 int depth;
115
116 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
117 if (p->parent == root)
118 break;
119
120 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
121 start = r->start;
122 end = r->end;
123 } else {
124 start = end = 0;
125 }
126
127 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
128 depth * 2, "",
129 width, start,
130 width, end,
131 r->name ? r->name : "<BAD>");
132 return 0;
133 }
134
135 static const struct seq_operations resource_op = {
136 .start = r_start,
137 .next = r_next,
138 .stop = r_stop,
139 .show = r_show,
140 };
141
ioresources_init(void)142 static int __init ioresources_init(void)
143 {
144 proc_create_seq_data("ioports", 0, NULL, &resource_op,
145 &ioport_resource);
146 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
147 return 0;
148 }
149 __initcall(ioresources_init);
150
151 #endif /* CONFIG_PROC_FS */
152
free_resource(struct resource * res)153 static void free_resource(struct resource *res)
154 {
155 /**
156 * If the resource was allocated using memblock early during boot
157 * we'll leak it here: we can only return full pages back to the
158 * buddy and trying to be smart and reusing them eventually in
159 * alloc_resource() overcomplicates resource handling.
160 */
161 if (res && PageSlab(virt_to_head_page(res)))
162 kfree(res);
163 }
164
alloc_resource(gfp_t flags)165 static struct resource *alloc_resource(gfp_t flags)
166 {
167 return kzalloc(sizeof(struct resource), flags);
168 }
169
170 /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)171 static struct resource * __request_resource(struct resource *root, struct resource *new)
172 {
173 resource_size_t start = new->start;
174 resource_size_t end = new->end;
175 struct resource *tmp, **p;
176
177 if (end < start)
178 return root;
179 if (start < root->start)
180 return root;
181 if (end > root->end)
182 return root;
183 p = &root->child;
184 for (;;) {
185 tmp = *p;
186 if (!tmp || tmp->start > end) {
187 new->sibling = tmp;
188 *p = new;
189 new->parent = root;
190 return NULL;
191 }
192 p = &tmp->sibling;
193 if (tmp->end < start)
194 continue;
195 return tmp;
196 }
197 }
198
__release_resource(struct resource * old,bool release_child)199 static int __release_resource(struct resource *old, bool release_child)
200 {
201 struct resource *tmp, **p, *chd;
202
203 p = &old->parent->child;
204 for (;;) {
205 tmp = *p;
206 if (!tmp)
207 break;
208 if (tmp == old) {
209 if (release_child || !(tmp->child)) {
210 *p = tmp->sibling;
211 } else {
212 for (chd = tmp->child;; chd = chd->sibling) {
213 chd->parent = tmp->parent;
214 if (!(chd->sibling))
215 break;
216 }
217 *p = tmp->child;
218 chd->sibling = tmp->sibling;
219 }
220 old->parent = NULL;
221 return 0;
222 }
223 p = &tmp->sibling;
224 }
225 return -EINVAL;
226 }
227
__release_child_resources(struct resource * r)228 static void __release_child_resources(struct resource *r)
229 {
230 struct resource *tmp, *p;
231 resource_size_t size;
232
233 p = r->child;
234 r->child = NULL;
235 while (p) {
236 tmp = p;
237 p = p->sibling;
238
239 tmp->parent = NULL;
240 tmp->sibling = NULL;
241 __release_child_resources(tmp);
242
243 printk(KERN_DEBUG "release child resource %pR\n", tmp);
244 /* need to restore size, and keep flags */
245 size = resource_size(tmp);
246 tmp->start = 0;
247 tmp->end = size - 1;
248 }
249 }
250
release_child_resources(struct resource * r)251 void release_child_resources(struct resource *r)
252 {
253 write_lock(&resource_lock);
254 __release_child_resources(r);
255 write_unlock(&resource_lock);
256 }
257
258 /**
259 * request_resource_conflict - request and reserve an I/O or memory resource
260 * @root: root resource descriptor
261 * @new: resource descriptor desired by caller
262 *
263 * Returns 0 for success, conflict resource on error.
264 */
request_resource_conflict(struct resource * root,struct resource * new)265 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
266 {
267 struct resource *conflict;
268
269 write_lock(&resource_lock);
270 conflict = __request_resource(root, new);
271 write_unlock(&resource_lock);
272 return conflict;
273 }
274
275 /**
276 * request_resource - request and reserve an I/O or memory resource
277 * @root: root resource descriptor
278 * @new: resource descriptor desired by caller
279 *
280 * Returns 0 for success, negative error code on error.
281 */
request_resource(struct resource * root,struct resource * new)282 int request_resource(struct resource *root, struct resource *new)
283 {
284 struct resource *conflict;
285
286 conflict = request_resource_conflict(root, new);
287 return conflict ? -EBUSY : 0;
288 }
289
290 EXPORT_SYMBOL(request_resource);
291
292 /**
293 * release_resource - release a previously reserved resource
294 * @old: resource pointer
295 */
release_resource(struct resource * old)296 int release_resource(struct resource *old)
297 {
298 int retval;
299
300 write_lock(&resource_lock);
301 retval = __release_resource(old, true);
302 write_unlock(&resource_lock);
303 return retval;
304 }
305
306 EXPORT_SYMBOL(release_resource);
307
308 /**
309 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
310 * [@start..@end].
311 *
312 * If a resource is found, returns 0 and @*res is overwritten with the part
313 * of the resource that's within [@start..@end]; if none is found, returns
314 * -ENODEV. Returns -EINVAL for invalid parameters.
315 *
316 * @start: start address of the resource searched for
317 * @end: end address of same resource
318 * @flags: flags which the resource must have
319 * @desc: descriptor the resource must have
320 * @res: return ptr, if resource found
321 *
322 * The caller must specify @start, @end, @flags, and @desc
323 * (which may be IORES_DESC_NONE).
324 */
find_next_iomem_res(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res)325 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
326 unsigned long flags, unsigned long desc,
327 struct resource *res)
328 {
329 struct resource *p;
330
331 if (!res)
332 return -EINVAL;
333
334 if (start >= end)
335 return -EINVAL;
336
337 read_lock(&resource_lock);
338
339 for (p = iomem_resource.child; p; p = next_resource(p)) {
340 /* If we passed the resource we are looking for, stop */
341 if (p->start > end) {
342 p = NULL;
343 break;
344 }
345
346 /* Skip until we find a range that matches what we look for */
347 if (p->end < start)
348 continue;
349
350 if ((p->flags & flags) != flags)
351 continue;
352 if ((desc != IORES_DESC_NONE) && (desc != p->desc))
353 continue;
354
355 /* Found a match, break */
356 break;
357 }
358
359 if (p) {
360 /* copy data */
361 *res = (struct resource) {
362 .start = max(start, p->start),
363 .end = min(end, p->end),
364 .flags = p->flags,
365 .desc = p->desc,
366 .parent = p->parent,
367 };
368 }
369
370 read_unlock(&resource_lock);
371 return p ? 0 : -ENODEV;
372 }
373
__walk_iomem_res_desc(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *))374 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
375 unsigned long flags, unsigned long desc,
376 void *arg,
377 int (*func)(struct resource *, void *))
378 {
379 struct resource res;
380 int ret = -EINVAL;
381
382 while (start < end &&
383 !find_next_iomem_res(start, end, flags, desc, &res)) {
384 ret = (*func)(&res, arg);
385 if (ret)
386 break;
387
388 start = res.end + 1;
389 }
390
391 return ret;
392 }
393
394 /**
395 * walk_iomem_res_desc - Walks through iomem resources and calls func()
396 * with matching resource ranges.
397 * *
398 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
399 * @flags: I/O resource flags
400 * @start: start addr
401 * @end: end addr
402 * @arg: function argument for the callback @func
403 * @func: callback function that is called for each qualifying resource area
404 *
405 * All the memory ranges which overlap start,end and also match flags and
406 * desc are valid candidates.
407 *
408 * NOTE: For a new descriptor search, define a new IORES_DESC in
409 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
410 */
walk_iomem_res_desc(unsigned long desc,unsigned long flags,u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))411 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
412 u64 end, void *arg, int (*func)(struct resource *, void *))
413 {
414 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
415 }
416 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
417
418 /*
419 * This function calls the @func callback against all memory ranges of type
420 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
421 * Now, this function is only for System RAM, it deals with full ranges and
422 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
423 * ranges.
424 */
walk_system_ram_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))425 int walk_system_ram_res(u64 start, u64 end, void *arg,
426 int (*func)(struct resource *, void *))
427 {
428 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
429
430 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
431 func);
432 }
433
434 /*
435 * This function calls the @func callback against all memory ranges, which
436 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
437 */
walk_mem_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))438 int walk_mem_res(u64 start, u64 end, void *arg,
439 int (*func)(struct resource *, void *))
440 {
441 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
442
443 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
444 func);
445 }
446
447 /*
448 * This function calls the @func callback against all memory ranges of type
449 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
450 * It is to be used only for System RAM.
451 */
walk_system_ram_range(unsigned long start_pfn,unsigned long nr_pages,void * arg,int (* func)(unsigned long,unsigned long,void *))452 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
453 void *arg, int (*func)(unsigned long, unsigned long, void *))
454 {
455 resource_size_t start, end;
456 unsigned long flags;
457 struct resource res;
458 unsigned long pfn, end_pfn;
459 int ret = -EINVAL;
460
461 start = (u64) start_pfn << PAGE_SHIFT;
462 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
463 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
464 while (start < end &&
465 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
466 pfn = PFN_UP(res.start);
467 end_pfn = PFN_DOWN(res.end + 1);
468 if (end_pfn > pfn)
469 ret = (*func)(pfn, end_pfn - pfn, arg);
470 if (ret)
471 break;
472 start = res.end + 1;
473 }
474 return ret;
475 }
476
__is_ram(unsigned long pfn,unsigned long nr_pages,void * arg)477 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
478 {
479 return 1;
480 }
481
482 /*
483 * This generic page_is_ram() returns true if specified address is
484 * registered as System RAM in iomem_resource list.
485 */
page_is_ram(unsigned long pfn)486 int __weak page_is_ram(unsigned long pfn)
487 {
488 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
489 }
490 EXPORT_SYMBOL_GPL(page_is_ram);
491
__region_intersects(struct resource * parent,resource_size_t start,size_t size,unsigned long flags,unsigned long desc)492 static int __region_intersects(struct resource *parent, resource_size_t start,
493 size_t size, unsigned long flags,
494 unsigned long desc)
495 {
496 resource_size_t ostart, oend;
497 int type = 0; int other = 0;
498 struct resource *p, *dp;
499 bool is_type, covered;
500 struct resource res;
501
502 res.start = start;
503 res.end = start + size - 1;
504
505 for (p = parent->child; p ; p = p->sibling) {
506 if (!resource_overlaps(p, &res))
507 continue;
508 is_type = (p->flags & flags) == flags &&
509 (desc == IORES_DESC_NONE || desc == p->desc);
510 if (is_type) {
511 type++;
512 continue;
513 }
514 /*
515 * Continue to search in descendant resources as if the
516 * matched descendant resources cover some ranges of 'p'.
517 *
518 * |------------- "CXL Window 0" ------------|
519 * |-- "System RAM" --|
520 *
521 * will behave similar as the following fake resource
522 * tree when searching "System RAM".
523 *
524 * |-- "System RAM" --||-- "CXL Window 0a" --|
525 */
526 covered = false;
527 ostart = max(res.start, p->start);
528 oend = min(res.end, p->end);
529 for_each_resource(p, dp, false) {
530 if (!resource_overlaps(dp, &res))
531 continue;
532 is_type = (dp->flags & flags) == flags &&
533 (desc == IORES_DESC_NONE || desc == dp->desc);
534 if (is_type) {
535 type++;
536 /*
537 * Range from 'ostart' to 'dp->start'
538 * isn't covered by matched resource.
539 */
540 if (dp->start > ostart)
541 break;
542 if (dp->end >= oend) {
543 covered = true;
544 break;
545 }
546 /* Remove covered range */
547 ostart = max(ostart, dp->end + 1);
548 }
549 }
550 if (!covered)
551 other++;
552 }
553
554 if (type == 0)
555 return REGION_DISJOINT;
556
557 if (other == 0)
558 return REGION_INTERSECTS;
559
560 return REGION_MIXED;
561 }
562
563 /**
564 * region_intersects() - determine intersection of region with known resources
565 * @start: region start address
566 * @size: size of region
567 * @flags: flags of resource (in iomem_resource)
568 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
569 *
570 * Check if the specified region partially overlaps or fully eclipses a
571 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
572 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
573 * return REGION_MIXED if the region overlaps @flags/@desc and another
574 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
575 * and no other defined resource. Note that REGION_INTERSECTS is also
576 * returned in the case when the specified region overlaps RAM and undefined
577 * memory holes.
578 *
579 * region_intersect() is used by memory remapping functions to ensure
580 * the user is not remapping RAM and is a vast speed up over walking
581 * through the resource table page by page.
582 */
region_intersects(resource_size_t start,size_t size,unsigned long flags,unsigned long desc)583 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
584 unsigned long desc)
585 {
586 int ret;
587
588 read_lock(&resource_lock);
589 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
590 read_unlock(&resource_lock);
591
592 return ret;
593 }
594 EXPORT_SYMBOL_GPL(region_intersects);
595
arch_remove_reservations(struct resource * avail)596 void __weak arch_remove_reservations(struct resource *avail)
597 {
598 }
599
simple_align_resource(void * data,const struct resource * avail,resource_size_t size,resource_size_t align)600 static resource_size_t simple_align_resource(void *data,
601 const struct resource *avail,
602 resource_size_t size,
603 resource_size_t align)
604 {
605 return avail->start;
606 }
607
resource_clip(struct resource * res,resource_size_t min,resource_size_t max)608 static void resource_clip(struct resource *res, resource_size_t min,
609 resource_size_t max)
610 {
611 if (res->start < min)
612 res->start = min;
613 if (res->end > max)
614 res->end = max;
615 }
616
617 /*
618 * Find empty slot in the resource tree with the given range and
619 * alignment constraints
620 */
__find_resource(struct resource * root,struct resource * old,struct resource * new,resource_size_t size,struct resource_constraint * constraint)621 static int __find_resource(struct resource *root, struct resource *old,
622 struct resource *new,
623 resource_size_t size,
624 struct resource_constraint *constraint)
625 {
626 struct resource *this = root->child;
627 struct resource tmp = *new, avail, alloc;
628
629 tmp.start = root->start;
630 /*
631 * Skip past an allocated resource that starts at 0, since the assignment
632 * of this->start - 1 to tmp->end below would cause an underflow.
633 */
634 if (this && this->start == root->start) {
635 tmp.start = (this == old) ? old->start : this->end + 1;
636 this = this->sibling;
637 }
638 for(;;) {
639 if (this)
640 tmp.end = (this == old) ? this->end : this->start - 1;
641 else
642 tmp.end = root->end;
643
644 if (tmp.end < tmp.start)
645 goto next;
646
647 resource_clip(&tmp, constraint->min, constraint->max);
648 arch_remove_reservations(&tmp);
649
650 /* Check for overflow after ALIGN() */
651 avail.start = ALIGN(tmp.start, constraint->align);
652 avail.end = tmp.end;
653 avail.flags = new->flags & ~IORESOURCE_UNSET;
654 if (avail.start >= tmp.start) {
655 alloc.flags = avail.flags;
656 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
657 size, constraint->align);
658 alloc.end = alloc.start + size - 1;
659 if (alloc.start <= alloc.end &&
660 resource_contains(&avail, &alloc)) {
661 new->start = alloc.start;
662 new->end = alloc.end;
663 return 0;
664 }
665 }
666
667 next: if (!this || this->end == root->end)
668 break;
669
670 if (this != old)
671 tmp.start = this->end + 1;
672 this = this->sibling;
673 }
674 return -EBUSY;
675 }
676
677 /*
678 * Find empty slot in the resource tree given range and alignment.
679 */
find_resource(struct resource * root,struct resource * new,resource_size_t size,struct resource_constraint * constraint)680 static int find_resource(struct resource *root, struct resource *new,
681 resource_size_t size,
682 struct resource_constraint *constraint)
683 {
684 return __find_resource(root, NULL, new, size, constraint);
685 }
686
687 /**
688 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
689 * The resource will be relocated if the new size cannot be reallocated in the
690 * current location.
691 *
692 * @root: root resource descriptor
693 * @old: resource descriptor desired by caller
694 * @newsize: new size of the resource descriptor
695 * @constraint: the size and alignment constraints to be met.
696 */
reallocate_resource(struct resource * root,struct resource * old,resource_size_t newsize,struct resource_constraint * constraint)697 static int reallocate_resource(struct resource *root, struct resource *old,
698 resource_size_t newsize,
699 struct resource_constraint *constraint)
700 {
701 int err=0;
702 struct resource new = *old;
703 struct resource *conflict;
704
705 write_lock(&resource_lock);
706
707 if ((err = __find_resource(root, old, &new, newsize, constraint)))
708 goto out;
709
710 if (resource_contains(&new, old)) {
711 old->start = new.start;
712 old->end = new.end;
713 goto out;
714 }
715
716 if (old->child) {
717 err = -EBUSY;
718 goto out;
719 }
720
721 if (resource_contains(old, &new)) {
722 old->start = new.start;
723 old->end = new.end;
724 } else {
725 __release_resource(old, true);
726 *old = new;
727 conflict = __request_resource(root, old);
728 BUG_ON(conflict);
729 }
730 out:
731 write_unlock(&resource_lock);
732 return err;
733 }
734
735
736 /**
737 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
738 * The resource will be reallocated with a new size if it was already allocated
739 * @root: root resource descriptor
740 * @new: resource descriptor desired by caller
741 * @size: requested resource region size
742 * @min: minimum boundary to allocate
743 * @max: maximum boundary to allocate
744 * @align: alignment requested, in bytes
745 * @alignf: alignment function, optional, called if not NULL
746 * @alignf_data: arbitrary data to pass to the @alignf function
747 */
allocate_resource(struct resource * root,struct resource * new,resource_size_t size,resource_size_t min,resource_size_t max,resource_size_t align,resource_size_t (* alignf)(void *,const struct resource *,resource_size_t,resource_size_t),void * alignf_data)748 int allocate_resource(struct resource *root, struct resource *new,
749 resource_size_t size, resource_size_t min,
750 resource_size_t max, resource_size_t align,
751 resource_size_t (*alignf)(void *,
752 const struct resource *,
753 resource_size_t,
754 resource_size_t),
755 void *alignf_data)
756 {
757 int err;
758 struct resource_constraint constraint;
759
760 if (!alignf)
761 alignf = simple_align_resource;
762
763 constraint.min = min;
764 constraint.max = max;
765 constraint.align = align;
766 constraint.alignf = alignf;
767 constraint.alignf_data = alignf_data;
768
769 if ( new->parent ) {
770 /* resource is already allocated, try reallocating with
771 the new constraints */
772 return reallocate_resource(root, new, size, &constraint);
773 }
774
775 write_lock(&resource_lock);
776 err = find_resource(root, new, size, &constraint);
777 if (err >= 0 && __request_resource(root, new))
778 err = -EBUSY;
779 write_unlock(&resource_lock);
780 return err;
781 }
782
783 EXPORT_SYMBOL(allocate_resource);
784
785 /**
786 * lookup_resource - find an existing resource by a resource start address
787 * @root: root resource descriptor
788 * @start: resource start address
789 *
790 * Returns a pointer to the resource if found, NULL otherwise
791 */
lookup_resource(struct resource * root,resource_size_t start)792 struct resource *lookup_resource(struct resource *root, resource_size_t start)
793 {
794 struct resource *res;
795
796 read_lock(&resource_lock);
797 for (res = root->child; res; res = res->sibling) {
798 if (res->start == start)
799 break;
800 }
801 read_unlock(&resource_lock);
802
803 return res;
804 }
805
806 /*
807 * Insert a resource into the resource tree. If successful, return NULL,
808 * otherwise return the conflicting resource (compare to __request_resource())
809 */
__insert_resource(struct resource * parent,struct resource * new)810 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
811 {
812 struct resource *first, *next;
813
814 for (;; parent = first) {
815 first = __request_resource(parent, new);
816 if (!first)
817 return first;
818
819 if (first == parent)
820 return first;
821 if (WARN_ON(first == new)) /* duplicated insertion */
822 return first;
823
824 if ((first->start > new->start) || (first->end < new->end))
825 break;
826 if ((first->start == new->start) && (first->end == new->end))
827 break;
828 }
829
830 for (next = first; ; next = next->sibling) {
831 /* Partial overlap? Bad, and unfixable */
832 if (next->start < new->start || next->end > new->end)
833 return next;
834 if (!next->sibling)
835 break;
836 if (next->sibling->start > new->end)
837 break;
838 }
839
840 new->parent = parent;
841 new->sibling = next->sibling;
842 new->child = first;
843
844 next->sibling = NULL;
845 for (next = first; next; next = next->sibling)
846 next->parent = new;
847
848 if (parent->child == first) {
849 parent->child = new;
850 } else {
851 next = parent->child;
852 while (next->sibling != first)
853 next = next->sibling;
854 next->sibling = new;
855 }
856 return NULL;
857 }
858
859 /**
860 * insert_resource_conflict - Inserts resource in the resource tree
861 * @parent: parent of the new resource
862 * @new: new resource to insert
863 *
864 * Returns 0 on success, conflict resource if the resource can't be inserted.
865 *
866 * This function is equivalent to request_resource_conflict when no conflict
867 * happens. If a conflict happens, and the conflicting resources
868 * entirely fit within the range of the new resource, then the new
869 * resource is inserted and the conflicting resources become children of
870 * the new resource.
871 *
872 * This function is intended for producers of resources, such as FW modules
873 * and bus drivers.
874 */
insert_resource_conflict(struct resource * parent,struct resource * new)875 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
876 {
877 struct resource *conflict;
878
879 write_lock(&resource_lock);
880 conflict = __insert_resource(parent, new);
881 write_unlock(&resource_lock);
882 return conflict;
883 }
884
885 /**
886 * insert_resource - Inserts a resource in the resource tree
887 * @parent: parent of the new resource
888 * @new: new resource to insert
889 *
890 * Returns 0 on success, -EBUSY if the resource can't be inserted.
891 *
892 * This function is intended for producers of resources, such as FW modules
893 * and bus drivers.
894 */
insert_resource(struct resource * parent,struct resource * new)895 int insert_resource(struct resource *parent, struct resource *new)
896 {
897 struct resource *conflict;
898
899 conflict = insert_resource_conflict(parent, new);
900 return conflict ? -EBUSY : 0;
901 }
902 EXPORT_SYMBOL_GPL(insert_resource);
903
904 /**
905 * insert_resource_expand_to_fit - Insert a resource into the resource tree
906 * @root: root resource descriptor
907 * @new: new resource to insert
908 *
909 * Insert a resource into the resource tree, possibly expanding it in order
910 * to make it encompass any conflicting resources.
911 */
insert_resource_expand_to_fit(struct resource * root,struct resource * new)912 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
913 {
914 if (new->parent)
915 return;
916
917 write_lock(&resource_lock);
918 for (;;) {
919 struct resource *conflict;
920
921 conflict = __insert_resource(root, new);
922 if (!conflict)
923 break;
924 if (conflict == root)
925 break;
926
927 /* Ok, expand resource to cover the conflict, then try again .. */
928 if (conflict->start < new->start)
929 new->start = conflict->start;
930 if (conflict->end > new->end)
931 new->end = conflict->end;
932
933 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
934 }
935 write_unlock(&resource_lock);
936 }
937 /*
938 * Not for general consumption, only early boot memory map parsing, PCI
939 * resource discovery, and late discovery of CXL resources are expected
940 * to use this interface. The former are built-in and only the latter,
941 * CXL, is a module.
942 */
943 EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
944
945 /**
946 * remove_resource - Remove a resource in the resource tree
947 * @old: resource to remove
948 *
949 * Returns 0 on success, -EINVAL if the resource is not valid.
950 *
951 * This function removes a resource previously inserted by insert_resource()
952 * or insert_resource_conflict(), and moves the children (if any) up to
953 * where they were before. insert_resource() and insert_resource_conflict()
954 * insert a new resource, and move any conflicting resources down to the
955 * children of the new resource.
956 *
957 * insert_resource(), insert_resource_conflict() and remove_resource() are
958 * intended for producers of resources, such as FW modules and bus drivers.
959 */
remove_resource(struct resource * old)960 int remove_resource(struct resource *old)
961 {
962 int retval;
963
964 write_lock(&resource_lock);
965 retval = __release_resource(old, false);
966 write_unlock(&resource_lock);
967 return retval;
968 }
969 EXPORT_SYMBOL_GPL(remove_resource);
970
__adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)971 static int __adjust_resource(struct resource *res, resource_size_t start,
972 resource_size_t size)
973 {
974 struct resource *tmp, *parent = res->parent;
975 resource_size_t end = start + size - 1;
976 int result = -EBUSY;
977
978 if (!parent)
979 goto skip;
980
981 if ((start < parent->start) || (end > parent->end))
982 goto out;
983
984 if (res->sibling && (res->sibling->start <= end))
985 goto out;
986
987 tmp = parent->child;
988 if (tmp != res) {
989 while (tmp->sibling != res)
990 tmp = tmp->sibling;
991 if (start <= tmp->end)
992 goto out;
993 }
994
995 skip:
996 for (tmp = res->child; tmp; tmp = tmp->sibling)
997 if ((tmp->start < start) || (tmp->end > end))
998 goto out;
999
1000 res->start = start;
1001 res->end = end;
1002 result = 0;
1003
1004 out:
1005 return result;
1006 }
1007
1008 /**
1009 * adjust_resource - modify a resource's start and size
1010 * @res: resource to modify
1011 * @start: new start value
1012 * @size: new size
1013 *
1014 * Given an existing resource, change its start and size to match the
1015 * arguments. Returns 0 on success, -EBUSY if it can't fit.
1016 * Existing children of the resource are assumed to be immutable.
1017 */
adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1018 int adjust_resource(struct resource *res, resource_size_t start,
1019 resource_size_t size)
1020 {
1021 int result;
1022
1023 write_lock(&resource_lock);
1024 result = __adjust_resource(res, start, size);
1025 write_unlock(&resource_lock);
1026 return result;
1027 }
1028 EXPORT_SYMBOL(adjust_resource);
1029
1030 static void __init
__reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1031 __reserve_region_with_split(struct resource *root, resource_size_t start,
1032 resource_size_t end, const char *name)
1033 {
1034 struct resource *parent = root;
1035 struct resource *conflict;
1036 struct resource *res = alloc_resource(GFP_ATOMIC);
1037 struct resource *next_res = NULL;
1038 int type = resource_type(root);
1039
1040 if (!res)
1041 return;
1042
1043 res->name = name;
1044 res->start = start;
1045 res->end = end;
1046 res->flags = type | IORESOURCE_BUSY;
1047 res->desc = IORES_DESC_NONE;
1048
1049 while (1) {
1050
1051 conflict = __request_resource(parent, res);
1052 if (!conflict) {
1053 if (!next_res)
1054 break;
1055 res = next_res;
1056 next_res = NULL;
1057 continue;
1058 }
1059
1060 /* conflict covered whole area */
1061 if (conflict->start <= res->start &&
1062 conflict->end >= res->end) {
1063 free_resource(res);
1064 WARN_ON(next_res);
1065 break;
1066 }
1067
1068 /* failed, split and try again */
1069 if (conflict->start > res->start) {
1070 end = res->end;
1071 res->end = conflict->start - 1;
1072 if (conflict->end < end) {
1073 next_res = alloc_resource(GFP_ATOMIC);
1074 if (!next_res) {
1075 free_resource(res);
1076 break;
1077 }
1078 next_res->name = name;
1079 next_res->start = conflict->end + 1;
1080 next_res->end = end;
1081 next_res->flags = type | IORESOURCE_BUSY;
1082 next_res->desc = IORES_DESC_NONE;
1083 }
1084 } else {
1085 res->start = conflict->end + 1;
1086 }
1087 }
1088
1089 }
1090
1091 void __init
reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1092 reserve_region_with_split(struct resource *root, resource_size_t start,
1093 resource_size_t end, const char *name)
1094 {
1095 int abort = 0;
1096
1097 write_lock(&resource_lock);
1098 if (root->start > start || root->end < end) {
1099 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1100 (unsigned long long)start, (unsigned long long)end,
1101 root);
1102 if (start > root->end || end < root->start)
1103 abort = 1;
1104 else {
1105 if (end > root->end)
1106 end = root->end;
1107 if (start < root->start)
1108 start = root->start;
1109 pr_err("fixing request to [0x%llx-0x%llx]\n",
1110 (unsigned long long)start,
1111 (unsigned long long)end);
1112 }
1113 dump_stack();
1114 }
1115 if (!abort)
1116 __reserve_region_with_split(root, start, end, name);
1117 write_unlock(&resource_lock);
1118 }
1119
1120 /**
1121 * resource_alignment - calculate resource's alignment
1122 * @res: resource pointer
1123 *
1124 * Returns alignment on success, 0 (invalid alignment) on failure.
1125 */
resource_alignment(struct resource * res)1126 resource_size_t resource_alignment(struct resource *res)
1127 {
1128 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1129 case IORESOURCE_SIZEALIGN:
1130 return resource_size(res);
1131 case IORESOURCE_STARTALIGN:
1132 return res->start;
1133 default:
1134 return 0;
1135 }
1136 }
1137
1138 /*
1139 * This is compatibility stuff for IO resources.
1140 *
1141 * Note how this, unlike the above, knows about
1142 * the IO flag meanings (busy etc).
1143 *
1144 * request_region creates a new busy region.
1145 *
1146 * release_region releases a matching busy region.
1147 */
1148
1149 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1150
1151 static struct inode *iomem_inode;
1152
1153 #ifdef CONFIG_IO_STRICT_DEVMEM
revoke_iomem(struct resource * res)1154 static void revoke_iomem(struct resource *res)
1155 {
1156 /* pairs with smp_store_release() in iomem_init_inode() */
1157 struct inode *inode = smp_load_acquire(&iomem_inode);
1158
1159 /*
1160 * Check that the initialization has completed. Losing the race
1161 * is ok because it means drivers are claiming resources before
1162 * the fs_initcall level of init and prevent iomem_get_mapping users
1163 * from establishing mappings.
1164 */
1165 if (!inode)
1166 return;
1167
1168 /*
1169 * The expectation is that the driver has successfully marked
1170 * the resource busy by this point, so devmem_is_allowed()
1171 * should start returning false, however for performance this
1172 * does not iterate the entire resource range.
1173 */
1174 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1175 devmem_is_allowed(PHYS_PFN(res->end))) {
1176 /*
1177 * *cringe* iomem=relaxed says "go ahead, what's the
1178 * worst that can happen?"
1179 */
1180 return;
1181 }
1182
1183 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1184 }
1185 #else
revoke_iomem(struct resource * res)1186 static void revoke_iomem(struct resource *res) {}
1187 #endif
1188
iomem_get_mapping(void)1189 struct address_space *iomem_get_mapping(void)
1190 {
1191 /*
1192 * This function is only called from file open paths, hence guaranteed
1193 * that fs_initcalls have completed and no need to check for NULL. But
1194 * since revoke_iomem can be called before the initcall we still need
1195 * the barrier to appease checkers.
1196 */
1197 return smp_load_acquire(&iomem_inode)->i_mapping;
1198 }
1199
__request_region_locked(struct resource * res,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1200 static int __request_region_locked(struct resource *res, struct resource *parent,
1201 resource_size_t start, resource_size_t n,
1202 const char *name, int flags)
1203 {
1204 DECLARE_WAITQUEUE(wait, current);
1205
1206 res->name = name;
1207 res->start = start;
1208 res->end = start + n - 1;
1209
1210 for (;;) {
1211 struct resource *conflict;
1212
1213 res->flags = resource_type(parent) | resource_ext_type(parent);
1214 res->flags |= IORESOURCE_BUSY | flags;
1215 res->desc = parent->desc;
1216
1217 conflict = __request_resource(parent, res);
1218 if (!conflict)
1219 break;
1220 /*
1221 * mm/hmm.c reserves physical addresses which then
1222 * become unavailable to other users. Conflicts are
1223 * not expected. Warn to aid debugging if encountered.
1224 */
1225 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1226 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1227 conflict->name, conflict, res);
1228 }
1229 if (conflict != parent) {
1230 if (!(conflict->flags & IORESOURCE_BUSY)) {
1231 parent = conflict;
1232 continue;
1233 }
1234 }
1235 if (conflict->flags & flags & IORESOURCE_MUXED) {
1236 add_wait_queue(&muxed_resource_wait, &wait);
1237 write_unlock(&resource_lock);
1238 set_current_state(TASK_UNINTERRUPTIBLE);
1239 schedule();
1240 remove_wait_queue(&muxed_resource_wait, &wait);
1241 write_lock(&resource_lock);
1242 continue;
1243 }
1244 /* Uhhuh, that didn't work out.. */
1245 return -EBUSY;
1246 }
1247
1248 return 0;
1249 }
1250
1251 /**
1252 * __request_region - create a new busy resource region
1253 * @parent: parent resource descriptor
1254 * @start: resource start address
1255 * @n: resource region size
1256 * @name: reserving caller's ID string
1257 * @flags: IO resource flags
1258 */
__request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1259 struct resource *__request_region(struct resource *parent,
1260 resource_size_t start, resource_size_t n,
1261 const char *name, int flags)
1262 {
1263 struct resource *res = alloc_resource(GFP_KERNEL);
1264 int ret;
1265
1266 if (!res)
1267 return NULL;
1268
1269 write_lock(&resource_lock);
1270 ret = __request_region_locked(res, parent, start, n, name, flags);
1271 write_unlock(&resource_lock);
1272
1273 if (ret) {
1274 free_resource(res);
1275 return NULL;
1276 }
1277
1278 if (parent == &iomem_resource)
1279 revoke_iomem(res);
1280
1281 return res;
1282 }
1283 EXPORT_SYMBOL(__request_region);
1284
1285 /**
1286 * __release_region - release a previously reserved resource region
1287 * @parent: parent resource descriptor
1288 * @start: resource start address
1289 * @n: resource region size
1290 *
1291 * The described resource region must match a currently busy region.
1292 */
__release_region(struct resource * parent,resource_size_t start,resource_size_t n)1293 void __release_region(struct resource *parent, resource_size_t start,
1294 resource_size_t n)
1295 {
1296 struct resource **p;
1297 resource_size_t end;
1298
1299 p = &parent->child;
1300 end = start + n - 1;
1301
1302 write_lock(&resource_lock);
1303
1304 for (;;) {
1305 struct resource *res = *p;
1306
1307 if (!res)
1308 break;
1309 if (res->start <= start && res->end >= end) {
1310 if (!(res->flags & IORESOURCE_BUSY)) {
1311 p = &res->child;
1312 continue;
1313 }
1314 if (res->start != start || res->end != end)
1315 break;
1316 *p = res->sibling;
1317 write_unlock(&resource_lock);
1318 if (res->flags & IORESOURCE_MUXED)
1319 wake_up(&muxed_resource_wait);
1320 free_resource(res);
1321 return;
1322 }
1323 p = &res->sibling;
1324 }
1325
1326 write_unlock(&resource_lock);
1327
1328 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1329 }
1330 EXPORT_SYMBOL(__release_region);
1331
1332 #ifdef CONFIG_MEMORY_HOTREMOVE
1333 /**
1334 * release_mem_region_adjustable - release a previously reserved memory region
1335 * @start: resource start address
1336 * @size: resource region size
1337 *
1338 * This interface is intended for memory hot-delete. The requested region
1339 * is released from a currently busy memory resource. The requested region
1340 * must either match exactly or fit into a single busy resource entry. In
1341 * the latter case, the remaining resource is adjusted accordingly.
1342 * Existing children of the busy memory resource must be immutable in the
1343 * request.
1344 *
1345 * Note:
1346 * - Additional release conditions, such as overlapping region, can be
1347 * supported after they are confirmed as valid cases.
1348 * - When a busy memory resource gets split into two entries, the code
1349 * assumes that all children remain in the lower address entry for
1350 * simplicity. Enhance this logic when necessary.
1351 */
release_mem_region_adjustable(resource_size_t start,resource_size_t size)1352 void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1353 {
1354 struct resource *parent = &iomem_resource;
1355 struct resource *new_res = NULL;
1356 bool alloc_nofail = false;
1357 struct resource **p;
1358 struct resource *res;
1359 resource_size_t end;
1360
1361 end = start + size - 1;
1362 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1363 return;
1364
1365 /*
1366 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1367 * just before releasing the region. This is highly unlikely to
1368 * fail - let's play save and make it never fail as the caller cannot
1369 * perform any error handling (e.g., trying to re-add memory will fail
1370 * similarly).
1371 */
1372 retry:
1373 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1374
1375 p = &parent->child;
1376 write_lock(&resource_lock);
1377
1378 while ((res = *p)) {
1379 if (res->start >= end)
1380 break;
1381
1382 /* look for the next resource if it does not fit into */
1383 if (res->start > start || res->end < end) {
1384 p = &res->sibling;
1385 continue;
1386 }
1387
1388 if (!(res->flags & IORESOURCE_MEM))
1389 break;
1390
1391 if (!(res->flags & IORESOURCE_BUSY)) {
1392 p = &res->child;
1393 continue;
1394 }
1395
1396 /* found the target resource; let's adjust accordingly */
1397 if (res->start == start && res->end == end) {
1398 /* free the whole entry */
1399 *p = res->sibling;
1400 free_resource(res);
1401 } else if (res->start == start && res->end != end) {
1402 /* adjust the start */
1403 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1404 res->end - end));
1405 } else if (res->start != start && res->end == end) {
1406 /* adjust the end */
1407 WARN_ON_ONCE(__adjust_resource(res, res->start,
1408 start - res->start));
1409 } else {
1410 /* split into two entries - we need a new resource */
1411 if (!new_res) {
1412 new_res = alloc_resource(GFP_ATOMIC);
1413 if (!new_res) {
1414 alloc_nofail = true;
1415 write_unlock(&resource_lock);
1416 goto retry;
1417 }
1418 }
1419 new_res->name = res->name;
1420 new_res->start = end + 1;
1421 new_res->end = res->end;
1422 new_res->flags = res->flags;
1423 new_res->desc = res->desc;
1424 new_res->parent = res->parent;
1425 new_res->sibling = res->sibling;
1426 new_res->child = NULL;
1427
1428 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1429 start - res->start)))
1430 break;
1431 res->sibling = new_res;
1432 new_res = NULL;
1433 }
1434
1435 break;
1436 }
1437
1438 write_unlock(&resource_lock);
1439 free_resource(new_res);
1440 }
1441 #endif /* CONFIG_MEMORY_HOTREMOVE */
1442
1443 #ifdef CONFIG_MEMORY_HOTPLUG
system_ram_resources_mergeable(struct resource * r1,struct resource * r2)1444 static bool system_ram_resources_mergeable(struct resource *r1,
1445 struct resource *r2)
1446 {
1447 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1448 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1449 r1->name == r2->name && r1->desc == r2->desc &&
1450 !r1->child && !r2->child;
1451 }
1452
1453 /**
1454 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1455 * merge it with adjacent, mergeable resources
1456 * @res: resource descriptor
1457 *
1458 * This interface is intended for memory hotplug, whereby lots of contiguous
1459 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1460 * the actual resource boundaries are not of interest (e.g., it might be
1461 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1462 * same parent, and that don't have any children are considered. All mergeable
1463 * resources must be immutable during the request.
1464 *
1465 * Note:
1466 * - The caller has to make sure that no pointers to resources that are
1467 * marked mergeable are used anymore after this call - the resource might
1468 * be freed and the pointer might be stale!
1469 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1470 */
merge_system_ram_resource(struct resource * res)1471 void merge_system_ram_resource(struct resource *res)
1472 {
1473 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1474 struct resource *cur;
1475
1476 if (WARN_ON_ONCE((res->flags & flags) != flags))
1477 return;
1478
1479 write_lock(&resource_lock);
1480 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1481
1482 /* Try to merge with next item in the list. */
1483 cur = res->sibling;
1484 if (cur && system_ram_resources_mergeable(res, cur)) {
1485 res->end = cur->end;
1486 res->sibling = cur->sibling;
1487 free_resource(cur);
1488 }
1489
1490 /* Try to merge with previous item in the list. */
1491 cur = res->parent->child;
1492 while (cur && cur->sibling != res)
1493 cur = cur->sibling;
1494 if (cur && system_ram_resources_mergeable(cur, res)) {
1495 cur->end = res->end;
1496 cur->sibling = res->sibling;
1497 free_resource(res);
1498 }
1499 write_unlock(&resource_lock);
1500 }
1501 #endif /* CONFIG_MEMORY_HOTPLUG */
1502
1503 /*
1504 * Managed region resource
1505 */
devm_resource_release(struct device * dev,void * ptr)1506 static void devm_resource_release(struct device *dev, void *ptr)
1507 {
1508 struct resource **r = ptr;
1509
1510 release_resource(*r);
1511 }
1512
1513 /**
1514 * devm_request_resource() - request and reserve an I/O or memory resource
1515 * @dev: device for which to request the resource
1516 * @root: root of the resource tree from which to request the resource
1517 * @new: descriptor of the resource to request
1518 *
1519 * This is a device-managed version of request_resource(). There is usually
1520 * no need to release resources requested by this function explicitly since
1521 * that will be taken care of when the device is unbound from its driver.
1522 * If for some reason the resource needs to be released explicitly, because
1523 * of ordering issues for example, drivers must call devm_release_resource()
1524 * rather than the regular release_resource().
1525 *
1526 * When a conflict is detected between any existing resources and the newly
1527 * requested resource, an error message will be printed.
1528 *
1529 * Returns 0 on success or a negative error code on failure.
1530 */
devm_request_resource(struct device * dev,struct resource * root,struct resource * new)1531 int devm_request_resource(struct device *dev, struct resource *root,
1532 struct resource *new)
1533 {
1534 struct resource *conflict, **ptr;
1535
1536 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1537 if (!ptr)
1538 return -ENOMEM;
1539
1540 *ptr = new;
1541
1542 conflict = request_resource_conflict(root, new);
1543 if (conflict) {
1544 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1545 new, conflict->name, conflict);
1546 devres_free(ptr);
1547 return -EBUSY;
1548 }
1549
1550 devres_add(dev, ptr);
1551 return 0;
1552 }
1553 EXPORT_SYMBOL(devm_request_resource);
1554
devm_resource_match(struct device * dev,void * res,void * data)1555 static int devm_resource_match(struct device *dev, void *res, void *data)
1556 {
1557 struct resource **ptr = res;
1558
1559 return *ptr == data;
1560 }
1561
1562 /**
1563 * devm_release_resource() - release a previously requested resource
1564 * @dev: device for which to release the resource
1565 * @new: descriptor of the resource to release
1566 *
1567 * Releases a resource previously requested using devm_request_resource().
1568 */
devm_release_resource(struct device * dev,struct resource * new)1569 void devm_release_resource(struct device *dev, struct resource *new)
1570 {
1571 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1572 new));
1573 }
1574 EXPORT_SYMBOL(devm_release_resource);
1575
1576 struct region_devres {
1577 struct resource *parent;
1578 resource_size_t start;
1579 resource_size_t n;
1580 };
1581
devm_region_release(struct device * dev,void * res)1582 static void devm_region_release(struct device *dev, void *res)
1583 {
1584 struct region_devres *this = res;
1585
1586 __release_region(this->parent, this->start, this->n);
1587 }
1588
devm_region_match(struct device * dev,void * res,void * match_data)1589 static int devm_region_match(struct device *dev, void *res, void *match_data)
1590 {
1591 struct region_devres *this = res, *match = match_data;
1592
1593 return this->parent == match->parent &&
1594 this->start == match->start && this->n == match->n;
1595 }
1596
1597 struct resource *
__devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)1598 __devm_request_region(struct device *dev, struct resource *parent,
1599 resource_size_t start, resource_size_t n, const char *name)
1600 {
1601 struct region_devres *dr = NULL;
1602 struct resource *res;
1603
1604 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1605 GFP_KERNEL);
1606 if (!dr)
1607 return NULL;
1608
1609 dr->parent = parent;
1610 dr->start = start;
1611 dr->n = n;
1612
1613 res = __request_region(parent, start, n, name, 0);
1614 if (res)
1615 devres_add(dev, dr);
1616 else
1617 devres_free(dr);
1618
1619 return res;
1620 }
1621 EXPORT_SYMBOL(__devm_request_region);
1622
__devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)1623 void __devm_release_region(struct device *dev, struct resource *parent,
1624 resource_size_t start, resource_size_t n)
1625 {
1626 struct region_devres match_data = { parent, start, n };
1627
1628 __release_region(parent, start, n);
1629 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1630 &match_data));
1631 }
1632 EXPORT_SYMBOL(__devm_release_region);
1633
1634 /*
1635 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1636 */
1637 #define MAXRESERVE 4
reserve_setup(char * str)1638 static int __init reserve_setup(char *str)
1639 {
1640 static int reserved;
1641 static struct resource reserve[MAXRESERVE];
1642
1643 for (;;) {
1644 unsigned int io_start, io_num;
1645 int x = reserved;
1646 struct resource *parent;
1647
1648 if (get_option(&str, &io_start) != 2)
1649 break;
1650 if (get_option(&str, &io_num) == 0)
1651 break;
1652 if (x < MAXRESERVE) {
1653 struct resource *res = reserve + x;
1654
1655 /*
1656 * If the region starts below 0x10000, we assume it's
1657 * I/O port space; otherwise assume it's memory.
1658 */
1659 if (io_start < 0x10000) {
1660 res->flags = IORESOURCE_IO;
1661 parent = &ioport_resource;
1662 } else {
1663 res->flags = IORESOURCE_MEM;
1664 parent = &iomem_resource;
1665 }
1666 res->name = "reserved";
1667 res->start = io_start;
1668 res->end = io_start + io_num - 1;
1669 res->flags |= IORESOURCE_BUSY;
1670 res->desc = IORES_DESC_NONE;
1671 res->child = NULL;
1672 if (request_resource(parent, res) == 0)
1673 reserved = x+1;
1674 }
1675 }
1676 return 1;
1677 }
1678 __setup("reserve=", reserve_setup);
1679
1680 /*
1681 * Check if the requested addr and size spans more than any slot in the
1682 * iomem resource tree.
1683 */
iomem_map_sanity_check(resource_size_t addr,unsigned long size)1684 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1685 {
1686 struct resource *p = &iomem_resource;
1687 resource_size_t end = addr + size - 1;
1688 int err = 0;
1689 loff_t l;
1690
1691 read_lock(&resource_lock);
1692 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1693 /*
1694 * We can probably skip the resources without
1695 * IORESOURCE_IO attribute?
1696 */
1697 if (p->start > end)
1698 continue;
1699 if (p->end < addr)
1700 continue;
1701 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1702 PFN_DOWN(p->end) >= PFN_DOWN(end))
1703 continue;
1704 /*
1705 * if a resource is "BUSY", it's not a hardware resource
1706 * but a driver mapping of such a resource; we don't want
1707 * to warn for those; some drivers legitimately map only
1708 * partial hardware resources. (example: vesafb)
1709 */
1710 if (p->flags & IORESOURCE_BUSY)
1711 continue;
1712
1713 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1714 &addr, &end, p->name, p);
1715 err = -1;
1716 break;
1717 }
1718 read_unlock(&resource_lock);
1719
1720 return err;
1721 }
1722
1723 #ifdef CONFIG_STRICT_DEVMEM
1724 static int strict_iomem_checks = 1;
1725 #else
1726 static int strict_iomem_checks;
1727 #endif
1728
1729 /*
1730 * Check if an address is exclusive to the kernel and must not be mapped to
1731 * user space, for example, via /dev/mem.
1732 *
1733 * Returns true if exclusive to the kernel, otherwise returns false.
1734 */
resource_is_exclusive(struct resource * root,u64 addr,resource_size_t size)1735 bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1736 {
1737 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1738 IORESOURCE_EXCLUSIVE;
1739 bool skip_children = false, err = false;
1740 struct resource *p;
1741
1742 read_lock(&resource_lock);
1743 for_each_resource(root, p, skip_children) {
1744 if (p->start >= addr + size)
1745 break;
1746 if (p->end < addr) {
1747 skip_children = true;
1748 continue;
1749 }
1750 skip_children = false;
1751
1752 /*
1753 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1754 * IORESOURCE_EXCLUSIVE is set, even if they
1755 * are not busy and even if "iomem=relaxed" is set. The
1756 * responsible driver dynamically adds/removes system RAM within
1757 * such an area and uncontrolled access is dangerous.
1758 */
1759 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1760 err = true;
1761 break;
1762 }
1763
1764 /*
1765 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1766 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1767 * resource is busy.
1768 */
1769 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1770 continue;
1771 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1772 || p->flags & IORESOURCE_EXCLUSIVE) {
1773 err = true;
1774 break;
1775 }
1776 }
1777 read_unlock(&resource_lock);
1778
1779 return err;
1780 }
1781
iomem_is_exclusive(u64 addr)1782 bool iomem_is_exclusive(u64 addr)
1783 {
1784 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1785 PAGE_SIZE);
1786 }
1787
resource_list_create_entry(struct resource * res,size_t extra_size)1788 struct resource_entry *resource_list_create_entry(struct resource *res,
1789 size_t extra_size)
1790 {
1791 struct resource_entry *entry;
1792
1793 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1794 if (entry) {
1795 INIT_LIST_HEAD(&entry->node);
1796 entry->res = res ? res : &entry->__res;
1797 }
1798
1799 return entry;
1800 }
1801 EXPORT_SYMBOL(resource_list_create_entry);
1802
resource_list_free(struct list_head * head)1803 void resource_list_free(struct list_head *head)
1804 {
1805 struct resource_entry *entry, *tmp;
1806
1807 list_for_each_entry_safe(entry, tmp, head, node)
1808 resource_list_destroy_entry(entry);
1809 }
1810 EXPORT_SYMBOL(resource_list_free);
1811
1812 #ifdef CONFIG_GET_FREE_REGION
1813 #define GFR_DESCENDING (1UL << 0)
1814 #define GFR_REQUEST_REGION (1UL << 1)
1815 #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1816
gfr_start(struct resource * base,resource_size_t size,resource_size_t align,unsigned long flags)1817 static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1818 resource_size_t align, unsigned long flags)
1819 {
1820 if (flags & GFR_DESCENDING) {
1821 resource_size_t end;
1822
1823 end = min_t(resource_size_t, base->end, PHYSMEM_END);
1824 return end - size + 1;
1825 }
1826
1827 return ALIGN(base->start, align);
1828 }
1829
gfr_continue(struct resource * base,resource_size_t addr,resource_size_t size,unsigned long flags)1830 static bool gfr_continue(struct resource *base, resource_size_t addr,
1831 resource_size_t size, unsigned long flags)
1832 {
1833 if (flags & GFR_DESCENDING)
1834 return addr > size && addr >= base->start;
1835 /*
1836 * In the ascend case be careful that the last increment by
1837 * @size did not wrap 0.
1838 */
1839 return addr > addr - size &&
1840 addr <= min_t(resource_size_t, base->end, PHYSMEM_END);
1841 }
1842
gfr_next(resource_size_t addr,resource_size_t size,unsigned long flags)1843 static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1844 unsigned long flags)
1845 {
1846 if (flags & GFR_DESCENDING)
1847 return addr - size;
1848 return addr + size;
1849 }
1850
remove_free_mem_region(void * _res)1851 static void remove_free_mem_region(void *_res)
1852 {
1853 struct resource *res = _res;
1854
1855 if (res->parent)
1856 remove_resource(res);
1857 free_resource(res);
1858 }
1859
1860 static struct resource *
get_free_mem_region(struct device * dev,struct resource * base,resource_size_t size,const unsigned long align,const char * name,const unsigned long desc,const unsigned long flags)1861 get_free_mem_region(struct device *dev, struct resource *base,
1862 resource_size_t size, const unsigned long align,
1863 const char *name, const unsigned long desc,
1864 const unsigned long flags)
1865 {
1866 resource_size_t addr;
1867 struct resource *res;
1868 struct region_devres *dr = NULL;
1869
1870 size = ALIGN(size, align);
1871
1872 res = alloc_resource(GFP_KERNEL);
1873 if (!res)
1874 return ERR_PTR(-ENOMEM);
1875
1876 if (dev && (flags & GFR_REQUEST_REGION)) {
1877 dr = devres_alloc(devm_region_release,
1878 sizeof(struct region_devres), GFP_KERNEL);
1879 if (!dr) {
1880 free_resource(res);
1881 return ERR_PTR(-ENOMEM);
1882 }
1883 } else if (dev) {
1884 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1885 return ERR_PTR(-ENOMEM);
1886 }
1887
1888 write_lock(&resource_lock);
1889 for (addr = gfr_start(base, size, align, flags);
1890 gfr_continue(base, addr, align, flags);
1891 addr = gfr_next(addr, align, flags)) {
1892 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1893 REGION_DISJOINT)
1894 continue;
1895
1896 if (flags & GFR_REQUEST_REGION) {
1897 if (__request_region_locked(res, &iomem_resource, addr,
1898 size, name, 0))
1899 break;
1900
1901 if (dev) {
1902 dr->parent = &iomem_resource;
1903 dr->start = addr;
1904 dr->n = size;
1905 devres_add(dev, dr);
1906 }
1907
1908 res->desc = desc;
1909 write_unlock(&resource_lock);
1910
1911
1912 /*
1913 * A driver is claiming this region so revoke any
1914 * mappings.
1915 */
1916 revoke_iomem(res);
1917 } else {
1918 res->start = addr;
1919 res->end = addr + size - 1;
1920 res->name = name;
1921 res->desc = desc;
1922 res->flags = IORESOURCE_MEM;
1923
1924 /*
1925 * Only succeed if the resource hosts an exclusive
1926 * range after the insert
1927 */
1928 if (__insert_resource(base, res) || res->child)
1929 break;
1930
1931 write_unlock(&resource_lock);
1932 }
1933
1934 return res;
1935 }
1936 write_unlock(&resource_lock);
1937
1938 if (flags & GFR_REQUEST_REGION) {
1939 free_resource(res);
1940 devres_free(dr);
1941 } else if (dev)
1942 devm_release_action(dev, remove_free_mem_region, res);
1943
1944 return ERR_PTR(-ERANGE);
1945 }
1946
1947 /**
1948 * devm_request_free_mem_region - find free region for device private memory
1949 *
1950 * @dev: device struct to bind the resource to
1951 * @size: size in bytes of the device memory to add
1952 * @base: resource tree to look in
1953 *
1954 * This function tries to find an empty range of physical address big enough to
1955 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1956 * memory, which in turn allocates struct pages.
1957 */
devm_request_free_mem_region(struct device * dev,struct resource * base,unsigned long size)1958 struct resource *devm_request_free_mem_region(struct device *dev,
1959 struct resource *base, unsigned long size)
1960 {
1961 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1962
1963 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
1964 dev_name(dev),
1965 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1966 }
1967 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1968
request_free_mem_region(struct resource * base,unsigned long size,const char * name)1969 struct resource *request_free_mem_region(struct resource *base,
1970 unsigned long size, const char *name)
1971 {
1972 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1973
1974 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
1975 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1976 }
1977 EXPORT_SYMBOL_GPL(request_free_mem_region);
1978
1979 /**
1980 * alloc_free_mem_region - find a free region relative to @base
1981 * @base: resource that will parent the new resource
1982 * @size: size in bytes of memory to allocate from @base
1983 * @align: alignment requirements for the allocation
1984 * @name: resource name
1985 *
1986 * Buses like CXL, that can dynamically instantiate new memory regions,
1987 * need a method to allocate physical address space for those regions.
1988 * Allocate and insert a new resource to cover a free, unclaimed by a
1989 * descendant of @base, range in the span of @base.
1990 */
alloc_free_mem_region(struct resource * base,unsigned long size,unsigned long align,const char * name)1991 struct resource *alloc_free_mem_region(struct resource *base,
1992 unsigned long size, unsigned long align,
1993 const char *name)
1994 {
1995 /* Default of ascending direction and insert resource */
1996 unsigned long flags = 0;
1997
1998 return get_free_mem_region(NULL, base, size, align, name,
1999 IORES_DESC_NONE, flags);
2000 }
2001 EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
2002 #endif /* CONFIG_GET_FREE_REGION */
2003
strict_iomem(char * str)2004 static int __init strict_iomem(char *str)
2005 {
2006 if (strstr(str, "relaxed"))
2007 strict_iomem_checks = 0;
2008 if (strstr(str, "strict"))
2009 strict_iomem_checks = 1;
2010 return 1;
2011 }
2012
iomem_fs_init_fs_context(struct fs_context * fc)2013 static int iomem_fs_init_fs_context(struct fs_context *fc)
2014 {
2015 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2016 }
2017
2018 static struct file_system_type iomem_fs_type = {
2019 .name = "iomem",
2020 .owner = THIS_MODULE,
2021 .init_fs_context = iomem_fs_init_fs_context,
2022 .kill_sb = kill_anon_super,
2023 };
2024
iomem_init_inode(void)2025 static int __init iomem_init_inode(void)
2026 {
2027 static struct vfsmount *iomem_vfs_mount;
2028 static int iomem_fs_cnt;
2029 struct inode *inode;
2030 int rc;
2031
2032 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2033 if (rc < 0) {
2034 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2035 return rc;
2036 }
2037
2038 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2039 if (IS_ERR(inode)) {
2040 rc = PTR_ERR(inode);
2041 pr_err("Cannot allocate inode for iomem: %d\n", rc);
2042 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2043 return rc;
2044 }
2045
2046 /*
2047 * Publish iomem revocation inode initialized.
2048 * Pairs with smp_load_acquire() in revoke_iomem().
2049 */
2050 smp_store_release(&iomem_inode, inode);
2051
2052 return 0;
2053 }
2054
2055 fs_initcall(iomem_init_inode);
2056
2057 __setup("iomem=", strict_iomem);
2058