Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/proc_fs.h>
21#include <linux/pseudo_fs.h>
22#include <linux/sched.h>
23#include <linux/seq_file.h>
24#include <linux/device.h>
25#include <linux/pfn.h>
26#include <linux/mm.h>
27#include <linux/mount.h>
28#include <linux/resource_ext.h>
29#include <uapi/linux/magic.h>
30#include <linux/string.h>
31#include <linux/vmalloc.h>
32#include <asm/io.h>
33
34
35struct resource ioport_resource = {
36 .name = "PCI IO",
37 .start = 0,
38 .end = IO_SPACE_LIMIT,
39 .flags = IORESOURCE_IO,
40};
41EXPORT_SYMBOL(ioport_resource);
42
43struct resource iomem_resource = {
44 .name = "PCI mem",
45 .start = 0,
46 .end = -1,
47 .flags = IORESOURCE_MEM,
48};
49EXPORT_SYMBOL(iomem_resource);
50
51/* constraints to be met while allocating resources */
52struct resource_constraint {
53 resource_size_t min, max, align;
54 resource_size_t (*alignf)(void *, const struct resource *,
55 resource_size_t, resource_size_t);
56 void *alignf_data;
57};
58
59static DEFINE_RWLOCK(resource_lock);
60
61static struct resource *next_resource(struct resource *p, bool skip_children)
62{
63 if (!skip_children && p->child)
64 return p->child;
65 while (!p->sibling && p->parent)
66 p = p->parent;
67 return p->sibling;
68}
69
70#define for_each_resource(_root, _p, _skip_children) \
71 for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
72
73#ifdef CONFIG_PROC_FS
74
75enum { MAX_IORES_LEVEL = 5 };
76
77static void *r_start(struct seq_file *m, loff_t *pos)
78 __acquires(resource_lock)
79{
80 struct resource *root = pde_data(file_inode(m->file));
81 struct resource *p;
82 loff_t l = *pos;
83
84 read_lock(&resource_lock);
85 for_each_resource(root, p, false) {
86 if (l-- == 0)
87 break;
88 }
89
90 return p;
91}
92
93static void *r_next(struct seq_file *m, void *v, loff_t *pos)
94{
95 struct resource *p = v;
96
97 (*pos)++;
98
99 return (void *)next_resource(p, false);
100}
101
102static void r_stop(struct seq_file *m, void *v)
103 __releases(resource_lock)
104{
105 read_unlock(&resource_lock);
106}
107
108static int r_show(struct seq_file *m, void *v)
109{
110 struct resource *root = pde_data(file_inode(m->file));
111 struct resource *r = v, *p;
112 unsigned long long start, end;
113 int width = root->end < 0x10000 ? 4 : 8;
114 int depth;
115
116 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
117 if (p->parent == root)
118 break;
119
120 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
121 start = r->start;
122 end = r->end;
123 } else {
124 start = end = 0;
125 }
126
127 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
128 depth * 2, "",
129 width, start,
130 width, end,
131 r->name ? r->name : "<BAD>");
132 return 0;
133}
134
135static const struct seq_operations resource_op = {
136 .start = r_start,
137 .next = r_next,
138 .stop = r_stop,
139 .show = r_show,
140};
141
142static int __init ioresources_init(void)
143{
144 proc_create_seq_data("ioports", 0, NULL, &resource_op,
145 &ioport_resource);
146 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
147 return 0;
148}
149__initcall(ioresources_init);
150
151#endif /* CONFIG_PROC_FS */
152
153static void free_resource(struct resource *res)
154{
155 /**
156 * If the resource was allocated using memblock early during boot
157 * we'll leak it here: we can only return full pages back to the
158 * buddy and trying to be smart and reusing them eventually in
159 * alloc_resource() overcomplicates resource handling.
160 */
161 if (res && PageSlab(virt_to_head_page(res)))
162 kfree(res);
163}
164
165static struct resource *alloc_resource(gfp_t flags)
166{
167 return kzalloc(sizeof(struct resource), flags);
168}
169
170/* Return the conflict entry if you can't request it */
171static struct resource * __request_resource(struct resource *root, struct resource *new)
172{
173 resource_size_t start = new->start;
174 resource_size_t end = new->end;
175 struct resource *tmp, **p;
176
177 if (end < start)
178 return root;
179 if (start < root->start)
180 return root;
181 if (end > root->end)
182 return root;
183 p = &root->child;
184 for (;;) {
185 tmp = *p;
186 if (!tmp || tmp->start > end) {
187 new->sibling = tmp;
188 *p = new;
189 new->parent = root;
190 return NULL;
191 }
192 p = &tmp->sibling;
193 if (tmp->end < start)
194 continue;
195 return tmp;
196 }
197}
198
199static int __release_resource(struct resource *old, bool release_child)
200{
201 struct resource *tmp, **p, *chd;
202
203 p = &old->parent->child;
204 for (;;) {
205 tmp = *p;
206 if (!tmp)
207 break;
208 if (tmp == old) {
209 if (release_child || !(tmp->child)) {
210 *p = tmp->sibling;
211 } else {
212 for (chd = tmp->child;; chd = chd->sibling) {
213 chd->parent = tmp->parent;
214 if (!(chd->sibling))
215 break;
216 }
217 *p = tmp->child;
218 chd->sibling = tmp->sibling;
219 }
220 old->parent = NULL;
221 return 0;
222 }
223 p = &tmp->sibling;
224 }
225 return -EINVAL;
226}
227
228static void __release_child_resources(struct resource *r)
229{
230 struct resource *tmp, *p;
231 resource_size_t size;
232
233 p = r->child;
234 r->child = NULL;
235 while (p) {
236 tmp = p;
237 p = p->sibling;
238
239 tmp->parent = NULL;
240 tmp->sibling = NULL;
241 __release_child_resources(tmp);
242
243 printk(KERN_DEBUG "release child resource %pR\n", tmp);
244 /* need to restore size, and keep flags */
245 size = resource_size(tmp);
246 tmp->start = 0;
247 tmp->end = size - 1;
248 }
249}
250
251void release_child_resources(struct resource *r)
252{
253 write_lock(&resource_lock);
254 __release_child_resources(r);
255 write_unlock(&resource_lock);
256}
257
258/**
259 * request_resource_conflict - request and reserve an I/O or memory resource
260 * @root: root resource descriptor
261 * @new: resource descriptor desired by caller
262 *
263 * Returns 0 for success, conflict resource on error.
264 */
265struct resource *request_resource_conflict(struct resource *root, struct resource *new)
266{
267 struct resource *conflict;
268
269 write_lock(&resource_lock);
270 conflict = __request_resource(root, new);
271 write_unlock(&resource_lock);
272 return conflict;
273}
274
275/**
276 * request_resource - request and reserve an I/O or memory resource
277 * @root: root resource descriptor
278 * @new: resource descriptor desired by caller
279 *
280 * Returns 0 for success, negative error code on error.
281 */
282int request_resource(struct resource *root, struct resource *new)
283{
284 struct resource *conflict;
285
286 conflict = request_resource_conflict(root, new);
287 return conflict ? -EBUSY : 0;
288}
289
290EXPORT_SYMBOL(request_resource);
291
292/**
293 * release_resource - release a previously reserved resource
294 * @old: resource pointer
295 */
296int release_resource(struct resource *old)
297{
298 int retval;
299
300 write_lock(&resource_lock);
301 retval = __release_resource(old, true);
302 write_unlock(&resource_lock);
303 return retval;
304}
305
306EXPORT_SYMBOL(release_resource);
307
308/**
309 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
310 * [@start..@end].
311 *
312 * If a resource is found, returns 0 and @*res is overwritten with the part
313 * of the resource that's within [@start..@end]; if none is found, returns
314 * -ENODEV. Returns -EINVAL for invalid parameters.
315 *
316 * @start: start address of the resource searched for
317 * @end: end address of same resource
318 * @flags: flags which the resource must have
319 * @desc: descriptor the resource must have
320 * @res: return ptr, if resource found
321 *
322 * The caller must specify @start, @end, @flags, and @desc
323 * (which may be IORES_DESC_NONE).
324 */
325static int find_next_iomem_res(resource_size_t start, resource_size_t end,
326 unsigned long flags, unsigned long desc,
327 struct resource *res)
328{
329 struct resource *p;
330
331 if (!res)
332 return -EINVAL;
333
334 if (start >= end)
335 return -EINVAL;
336
337 read_lock(&resource_lock);
338
339 for_each_resource(&iomem_resource, p, false) {
340 /* If we passed the resource we are looking for, stop */
341 if (p->start > end) {
342 p = NULL;
343 break;
344 }
345
346 /* Skip until we find a range that matches what we look for */
347 if (p->end < start)
348 continue;
349
350 if ((p->flags & flags) != flags)
351 continue;
352 if ((desc != IORES_DESC_NONE) && (desc != p->desc))
353 continue;
354
355 /* Found a match, break */
356 break;
357 }
358
359 if (p) {
360 /* copy data */
361 *res = (struct resource) {
362 .start = max(start, p->start),
363 .end = min(end, p->end),
364 .flags = p->flags,
365 .desc = p->desc,
366 .parent = p->parent,
367 };
368 }
369
370 read_unlock(&resource_lock);
371 return p ? 0 : -ENODEV;
372}
373
374static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
375 unsigned long flags, unsigned long desc,
376 void *arg,
377 int (*func)(struct resource *, void *))
378{
379 struct resource res;
380 int ret = -EINVAL;
381
382 while (start < end &&
383 !find_next_iomem_res(start, end, flags, desc, &res)) {
384 ret = (*func)(&res, arg);
385 if (ret)
386 break;
387
388 start = res.end + 1;
389 }
390
391 return ret;
392}
393
394/**
395 * walk_iomem_res_desc - Walks through iomem resources and calls func()
396 * with matching resource ranges.
397 * *
398 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
399 * @flags: I/O resource flags
400 * @start: start addr
401 * @end: end addr
402 * @arg: function argument for the callback @func
403 * @func: callback function that is called for each qualifying resource area
404 *
405 * All the memory ranges which overlap start,end and also match flags and
406 * desc are valid candidates.
407 *
408 * NOTE: For a new descriptor search, define a new IORES_DESC in
409 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
410 */
411int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
412 u64 end, void *arg, int (*func)(struct resource *, void *))
413{
414 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
415}
416EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
417
418/*
419 * This function calls the @func callback against all memory ranges of type
420 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
421 * Now, this function is only for System RAM, it deals with full ranges and
422 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
423 * ranges.
424 */
425int walk_system_ram_res(u64 start, u64 end, void *arg,
426 int (*func)(struct resource *, void *))
427{
428 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
429
430 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
431 func);
432}
433
434/*
435 * This function, being a variant of walk_system_ram_res(), calls the @func
436 * callback against all memory ranges of type System RAM which are marked as
437 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
438 * higher to lower.
439 */
440int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
441 int (*func)(struct resource *, void *))
442{
443 struct resource res, *rams;
444 int rams_size = 16, i;
445 unsigned long flags;
446 int ret = -1;
447
448 /* create a list */
449 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
450 if (!rams)
451 return ret;
452
453 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
454 i = 0;
455 while ((start < end) &&
456 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
457 if (i >= rams_size) {
458 /* re-alloc */
459 struct resource *rams_new;
460
461 rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
462 (rams_size + 16) * sizeof(struct resource),
463 GFP_KERNEL);
464 if (!rams_new)
465 goto out;
466
467 rams = rams_new;
468 rams_size += 16;
469 }
470
471 rams[i].start = res.start;
472 rams[i++].end = res.end;
473
474 start = res.end + 1;
475 }
476
477 /* go reverse */
478 for (i--; i >= 0; i--) {
479 ret = (*func)(&rams[i], arg);
480 if (ret)
481 break;
482 }
483
484out:
485 kvfree(rams);
486 return ret;
487}
488
489/*
490 * This function calls the @func callback against all memory ranges, which
491 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
492 */
493int walk_mem_res(u64 start, u64 end, void *arg,
494 int (*func)(struct resource *, void *))
495{
496 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
497
498 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
499 func);
500}
501
502/*
503 * This function calls the @func callback against all memory ranges of type
504 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
505 * It is to be used only for System RAM.
506 */
507int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
508 void *arg, int (*func)(unsigned long, unsigned long, void *))
509{
510 resource_size_t start, end;
511 unsigned long flags;
512 struct resource res;
513 unsigned long pfn, end_pfn;
514 int ret = -EINVAL;
515
516 start = (u64) start_pfn << PAGE_SHIFT;
517 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
518 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
519 while (start < end &&
520 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
521 pfn = PFN_UP(res.start);
522 end_pfn = PFN_DOWN(res.end + 1);
523 if (end_pfn > pfn)
524 ret = (*func)(pfn, end_pfn - pfn, arg);
525 if (ret)
526 break;
527 start = res.end + 1;
528 }
529 return ret;
530}
531
532static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
533{
534 return 1;
535}
536
537/*
538 * This generic page_is_ram() returns true if specified address is
539 * registered as System RAM in iomem_resource list.
540 */
541int __weak page_is_ram(unsigned long pfn)
542{
543 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
544}
545EXPORT_SYMBOL_GPL(page_is_ram);
546
547static int __region_intersects(struct resource *parent, resource_size_t start,
548 size_t size, unsigned long flags,
549 unsigned long desc)
550{
551 struct resource res;
552 int type = 0; int other = 0;
553 struct resource *p;
554
555 res.start = start;
556 res.end = start + size - 1;
557
558 for (p = parent->child; p ; p = p->sibling) {
559 bool is_type = (((p->flags & flags) == flags) &&
560 ((desc == IORES_DESC_NONE) ||
561 (desc == p->desc)));
562
563 if (resource_overlaps(p, &res))
564 is_type ? type++ : other++;
565 }
566
567 if (type == 0)
568 return REGION_DISJOINT;
569
570 if (other == 0)
571 return REGION_INTERSECTS;
572
573 return REGION_MIXED;
574}
575
576/**
577 * region_intersects() - determine intersection of region with known resources
578 * @start: region start address
579 * @size: size of region
580 * @flags: flags of resource (in iomem_resource)
581 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
582 *
583 * Check if the specified region partially overlaps or fully eclipses a
584 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
585 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
586 * return REGION_MIXED if the region overlaps @flags/@desc and another
587 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
588 * and no other defined resource. Note that REGION_INTERSECTS is also
589 * returned in the case when the specified region overlaps RAM and undefined
590 * memory holes.
591 *
592 * region_intersect() is used by memory remapping functions to ensure
593 * the user is not remapping RAM and is a vast speed up over walking
594 * through the resource table page by page.
595 */
596int region_intersects(resource_size_t start, size_t size, unsigned long flags,
597 unsigned long desc)
598{
599 int ret;
600
601 read_lock(&resource_lock);
602 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
603 read_unlock(&resource_lock);
604
605 return ret;
606}
607EXPORT_SYMBOL_GPL(region_intersects);
608
609void __weak arch_remove_reservations(struct resource *avail)
610{
611}
612
613static resource_size_t simple_align_resource(void *data,
614 const struct resource *avail,
615 resource_size_t size,
616 resource_size_t align)
617{
618 return avail->start;
619}
620
621static void resource_clip(struct resource *res, resource_size_t min,
622 resource_size_t max)
623{
624 if (res->start < min)
625 res->start = min;
626 if (res->end > max)
627 res->end = max;
628}
629
630/*
631 * Find empty slot in the resource tree with the given range and
632 * alignment constraints
633 */
634static int __find_resource(struct resource *root, struct resource *old,
635 struct resource *new,
636 resource_size_t size,
637 struct resource_constraint *constraint)
638{
639 struct resource *this = root->child;
640 struct resource tmp = *new, avail, alloc;
641
642 tmp.start = root->start;
643 /*
644 * Skip past an allocated resource that starts at 0, since the assignment
645 * of this->start - 1 to tmp->end below would cause an underflow.
646 */
647 if (this && this->start == root->start) {
648 tmp.start = (this == old) ? old->start : this->end + 1;
649 this = this->sibling;
650 }
651 for(;;) {
652 if (this)
653 tmp.end = (this == old) ? this->end : this->start - 1;
654 else
655 tmp.end = root->end;
656
657 if (tmp.end < tmp.start)
658 goto next;
659
660 resource_clip(&tmp, constraint->min, constraint->max);
661 arch_remove_reservations(&tmp);
662
663 /* Check for overflow after ALIGN() */
664 avail.start = ALIGN(tmp.start, constraint->align);
665 avail.end = tmp.end;
666 avail.flags = new->flags & ~IORESOURCE_UNSET;
667 if (avail.start >= tmp.start) {
668 alloc.flags = avail.flags;
669 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
670 size, constraint->align);
671 alloc.end = alloc.start + size - 1;
672 if (alloc.start <= alloc.end &&
673 resource_contains(&avail, &alloc)) {
674 new->start = alloc.start;
675 new->end = alloc.end;
676 return 0;
677 }
678 }
679
680next: if (!this || this->end == root->end)
681 break;
682
683 if (this != old)
684 tmp.start = this->end + 1;
685 this = this->sibling;
686 }
687 return -EBUSY;
688}
689
690/*
691 * Find empty slot in the resource tree given range and alignment.
692 */
693static int find_resource(struct resource *root, struct resource *new,
694 resource_size_t size,
695 struct resource_constraint *constraint)
696{
697 return __find_resource(root, NULL, new, size, constraint);
698}
699
700/**
701 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
702 * The resource will be relocated if the new size cannot be reallocated in the
703 * current location.
704 *
705 * @root: root resource descriptor
706 * @old: resource descriptor desired by caller
707 * @newsize: new size of the resource descriptor
708 * @constraint: the size and alignment constraints to be met.
709 */
710static int reallocate_resource(struct resource *root, struct resource *old,
711 resource_size_t newsize,
712 struct resource_constraint *constraint)
713{
714 int err=0;
715 struct resource new = *old;
716 struct resource *conflict;
717
718 write_lock(&resource_lock);
719
720 if ((err = __find_resource(root, old, &new, newsize, constraint)))
721 goto out;
722
723 if (resource_contains(&new, old)) {
724 old->start = new.start;
725 old->end = new.end;
726 goto out;
727 }
728
729 if (old->child) {
730 err = -EBUSY;
731 goto out;
732 }
733
734 if (resource_contains(old, &new)) {
735 old->start = new.start;
736 old->end = new.end;
737 } else {
738 __release_resource(old, true);
739 *old = new;
740 conflict = __request_resource(root, old);
741 BUG_ON(conflict);
742 }
743out:
744 write_unlock(&resource_lock);
745 return err;
746}
747
748
749/**
750 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
751 * The resource will be reallocated with a new size if it was already allocated
752 * @root: root resource descriptor
753 * @new: resource descriptor desired by caller
754 * @size: requested resource region size
755 * @min: minimum boundary to allocate
756 * @max: maximum boundary to allocate
757 * @align: alignment requested, in bytes
758 * @alignf: alignment function, optional, called if not NULL
759 * @alignf_data: arbitrary data to pass to the @alignf function
760 */
761int allocate_resource(struct resource *root, struct resource *new,
762 resource_size_t size, resource_size_t min,
763 resource_size_t max, resource_size_t align,
764 resource_size_t (*alignf)(void *,
765 const struct resource *,
766 resource_size_t,
767 resource_size_t),
768 void *alignf_data)
769{
770 int err;
771 struct resource_constraint constraint;
772
773 if (!alignf)
774 alignf = simple_align_resource;
775
776 constraint.min = min;
777 constraint.max = max;
778 constraint.align = align;
779 constraint.alignf = alignf;
780 constraint.alignf_data = alignf_data;
781
782 if ( new->parent ) {
783 /* resource is already allocated, try reallocating with
784 the new constraints */
785 return reallocate_resource(root, new, size, &constraint);
786 }
787
788 write_lock(&resource_lock);
789 err = find_resource(root, new, size, &constraint);
790 if (err >= 0 && __request_resource(root, new))
791 err = -EBUSY;
792 write_unlock(&resource_lock);
793 return err;
794}
795
796EXPORT_SYMBOL(allocate_resource);
797
798/**
799 * lookup_resource - find an existing resource by a resource start address
800 * @root: root resource descriptor
801 * @start: resource start address
802 *
803 * Returns a pointer to the resource if found, NULL otherwise
804 */
805struct resource *lookup_resource(struct resource *root, resource_size_t start)
806{
807 struct resource *res;
808
809 read_lock(&resource_lock);
810 for (res = root->child; res; res = res->sibling) {
811 if (res->start == start)
812 break;
813 }
814 read_unlock(&resource_lock);
815
816 return res;
817}
818
819/*
820 * Insert a resource into the resource tree. If successful, return NULL,
821 * otherwise return the conflicting resource (compare to __request_resource())
822 */
823static struct resource * __insert_resource(struct resource *parent, struct resource *new)
824{
825 struct resource *first, *next;
826
827 for (;; parent = first) {
828 first = __request_resource(parent, new);
829 if (!first)
830 return first;
831
832 if (first == parent)
833 return first;
834 if (WARN_ON(first == new)) /* duplicated insertion */
835 return first;
836
837 if ((first->start > new->start) || (first->end < new->end))
838 break;
839 if ((first->start == new->start) && (first->end == new->end))
840 break;
841 }
842
843 for (next = first; ; next = next->sibling) {
844 /* Partial overlap? Bad, and unfixable */
845 if (next->start < new->start || next->end > new->end)
846 return next;
847 if (!next->sibling)
848 break;
849 if (next->sibling->start > new->end)
850 break;
851 }
852
853 new->parent = parent;
854 new->sibling = next->sibling;
855 new->child = first;
856
857 next->sibling = NULL;
858 for (next = first; next; next = next->sibling)
859 next->parent = new;
860
861 if (parent->child == first) {
862 parent->child = new;
863 } else {
864 next = parent->child;
865 while (next->sibling != first)
866 next = next->sibling;
867 next->sibling = new;
868 }
869 return NULL;
870}
871
872/**
873 * insert_resource_conflict - Inserts resource in the resource tree
874 * @parent: parent of the new resource
875 * @new: new resource to insert
876 *
877 * Returns 0 on success, conflict resource if the resource can't be inserted.
878 *
879 * This function is equivalent to request_resource_conflict when no conflict
880 * happens. If a conflict happens, and the conflicting resources
881 * entirely fit within the range of the new resource, then the new
882 * resource is inserted and the conflicting resources become children of
883 * the new resource.
884 *
885 * This function is intended for producers of resources, such as FW modules
886 * and bus drivers.
887 */
888struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
889{
890 struct resource *conflict;
891
892 write_lock(&resource_lock);
893 conflict = __insert_resource(parent, new);
894 write_unlock(&resource_lock);
895 return conflict;
896}
897
898/**
899 * insert_resource - Inserts a resource in the resource tree
900 * @parent: parent of the new resource
901 * @new: new resource to insert
902 *
903 * Returns 0 on success, -EBUSY if the resource can't be inserted.
904 *
905 * This function is intended for producers of resources, such as FW modules
906 * and bus drivers.
907 */
908int insert_resource(struct resource *parent, struct resource *new)
909{
910 struct resource *conflict;
911
912 conflict = insert_resource_conflict(parent, new);
913 return conflict ? -EBUSY : 0;
914}
915EXPORT_SYMBOL_GPL(insert_resource);
916
917/**
918 * insert_resource_expand_to_fit - Insert a resource into the resource tree
919 * @root: root resource descriptor
920 * @new: new resource to insert
921 *
922 * Insert a resource into the resource tree, possibly expanding it in order
923 * to make it encompass any conflicting resources.
924 */
925void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
926{
927 if (new->parent)
928 return;
929
930 write_lock(&resource_lock);
931 for (;;) {
932 struct resource *conflict;
933
934 conflict = __insert_resource(root, new);
935 if (!conflict)
936 break;
937 if (conflict == root)
938 break;
939
940 /* Ok, expand resource to cover the conflict, then try again .. */
941 if (conflict->start < new->start)
942 new->start = conflict->start;
943 if (conflict->end > new->end)
944 new->end = conflict->end;
945
946 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
947 }
948 write_unlock(&resource_lock);
949}
950/*
951 * Not for general consumption, only early boot memory map parsing, PCI
952 * resource discovery, and late discovery of CXL resources are expected
953 * to use this interface. The former are built-in and only the latter,
954 * CXL, is a module.
955 */
956EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
957
958/**
959 * remove_resource - Remove a resource in the resource tree
960 * @old: resource to remove
961 *
962 * Returns 0 on success, -EINVAL if the resource is not valid.
963 *
964 * This function removes a resource previously inserted by insert_resource()
965 * or insert_resource_conflict(), and moves the children (if any) up to
966 * where they were before. insert_resource() and insert_resource_conflict()
967 * insert a new resource, and move any conflicting resources down to the
968 * children of the new resource.
969 *
970 * insert_resource(), insert_resource_conflict() and remove_resource() are
971 * intended for producers of resources, such as FW modules and bus drivers.
972 */
973int remove_resource(struct resource *old)
974{
975 int retval;
976
977 write_lock(&resource_lock);
978 retval = __release_resource(old, false);
979 write_unlock(&resource_lock);
980 return retval;
981}
982EXPORT_SYMBOL_GPL(remove_resource);
983
984static int __adjust_resource(struct resource *res, resource_size_t start,
985 resource_size_t size)
986{
987 struct resource *tmp, *parent = res->parent;
988 resource_size_t end = start + size - 1;
989 int result = -EBUSY;
990
991 if (!parent)
992 goto skip;
993
994 if ((start < parent->start) || (end > parent->end))
995 goto out;
996
997 if (res->sibling && (res->sibling->start <= end))
998 goto out;
999
1000 tmp = parent->child;
1001 if (tmp != res) {
1002 while (tmp->sibling != res)
1003 tmp = tmp->sibling;
1004 if (start <= tmp->end)
1005 goto out;
1006 }
1007
1008skip:
1009 for (tmp = res->child; tmp; tmp = tmp->sibling)
1010 if ((tmp->start < start) || (tmp->end > end))
1011 goto out;
1012
1013 res->start = start;
1014 res->end = end;
1015 result = 0;
1016
1017 out:
1018 return result;
1019}
1020
1021/**
1022 * adjust_resource - modify a resource's start and size
1023 * @res: resource to modify
1024 * @start: new start value
1025 * @size: new size
1026 *
1027 * Given an existing resource, change its start and size to match the
1028 * arguments. Returns 0 on success, -EBUSY if it can't fit.
1029 * Existing children of the resource are assumed to be immutable.
1030 */
1031int adjust_resource(struct resource *res, resource_size_t start,
1032 resource_size_t size)
1033{
1034 int result;
1035
1036 write_lock(&resource_lock);
1037 result = __adjust_resource(res, start, size);
1038 write_unlock(&resource_lock);
1039 return result;
1040}
1041EXPORT_SYMBOL(adjust_resource);
1042
1043static void __init
1044__reserve_region_with_split(struct resource *root, resource_size_t start,
1045 resource_size_t end, const char *name)
1046{
1047 struct resource *parent = root;
1048 struct resource *conflict;
1049 struct resource *res = alloc_resource(GFP_ATOMIC);
1050 struct resource *next_res = NULL;
1051 int type = resource_type(root);
1052
1053 if (!res)
1054 return;
1055
1056 res->name = name;
1057 res->start = start;
1058 res->end = end;
1059 res->flags = type | IORESOURCE_BUSY;
1060 res->desc = IORES_DESC_NONE;
1061
1062 while (1) {
1063
1064 conflict = __request_resource(parent, res);
1065 if (!conflict) {
1066 if (!next_res)
1067 break;
1068 res = next_res;
1069 next_res = NULL;
1070 continue;
1071 }
1072
1073 /* conflict covered whole area */
1074 if (conflict->start <= res->start &&
1075 conflict->end >= res->end) {
1076 free_resource(res);
1077 WARN_ON(next_res);
1078 break;
1079 }
1080
1081 /* failed, split and try again */
1082 if (conflict->start > res->start) {
1083 end = res->end;
1084 res->end = conflict->start - 1;
1085 if (conflict->end < end) {
1086 next_res = alloc_resource(GFP_ATOMIC);
1087 if (!next_res) {
1088 free_resource(res);
1089 break;
1090 }
1091 next_res->name = name;
1092 next_res->start = conflict->end + 1;
1093 next_res->end = end;
1094 next_res->flags = type | IORESOURCE_BUSY;
1095 next_res->desc = IORES_DESC_NONE;
1096 }
1097 } else {
1098 res->start = conflict->end + 1;
1099 }
1100 }
1101
1102}
1103
1104void __init
1105reserve_region_with_split(struct resource *root, resource_size_t start,
1106 resource_size_t end, const char *name)
1107{
1108 int abort = 0;
1109
1110 write_lock(&resource_lock);
1111 if (root->start > start || root->end < end) {
1112 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1113 (unsigned long long)start, (unsigned long long)end,
1114 root);
1115 if (start > root->end || end < root->start)
1116 abort = 1;
1117 else {
1118 if (end > root->end)
1119 end = root->end;
1120 if (start < root->start)
1121 start = root->start;
1122 pr_err("fixing request to [0x%llx-0x%llx]\n",
1123 (unsigned long long)start,
1124 (unsigned long long)end);
1125 }
1126 dump_stack();
1127 }
1128 if (!abort)
1129 __reserve_region_with_split(root, start, end, name);
1130 write_unlock(&resource_lock);
1131}
1132
1133/**
1134 * resource_alignment - calculate resource's alignment
1135 * @res: resource pointer
1136 *
1137 * Returns alignment on success, 0 (invalid alignment) on failure.
1138 */
1139resource_size_t resource_alignment(struct resource *res)
1140{
1141 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1142 case IORESOURCE_SIZEALIGN:
1143 return resource_size(res);
1144 case IORESOURCE_STARTALIGN:
1145 return res->start;
1146 default:
1147 return 0;
1148 }
1149}
1150
1151/*
1152 * This is compatibility stuff for IO resources.
1153 *
1154 * Note how this, unlike the above, knows about
1155 * the IO flag meanings (busy etc).
1156 *
1157 * request_region creates a new busy region.
1158 *
1159 * release_region releases a matching busy region.
1160 */
1161
1162static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1163
1164static struct inode *iomem_inode;
1165
1166#ifdef CONFIG_IO_STRICT_DEVMEM
1167static void revoke_iomem(struct resource *res)
1168{
1169 /* pairs with smp_store_release() in iomem_init_inode() */
1170 struct inode *inode = smp_load_acquire(&iomem_inode);
1171
1172 /*
1173 * Check that the initialization has completed. Losing the race
1174 * is ok because it means drivers are claiming resources before
1175 * the fs_initcall level of init and prevent iomem_get_mapping users
1176 * from establishing mappings.
1177 */
1178 if (!inode)
1179 return;
1180
1181 /*
1182 * The expectation is that the driver has successfully marked
1183 * the resource busy by this point, so devmem_is_allowed()
1184 * should start returning false, however for performance this
1185 * does not iterate the entire resource range.
1186 */
1187 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1188 devmem_is_allowed(PHYS_PFN(res->end))) {
1189 /*
1190 * *cringe* iomem=relaxed says "go ahead, what's the
1191 * worst that can happen?"
1192 */
1193 return;
1194 }
1195
1196 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1197}
1198#else
1199static void revoke_iomem(struct resource *res) {}
1200#endif
1201
1202struct address_space *iomem_get_mapping(void)
1203{
1204 /*
1205 * This function is only called from file open paths, hence guaranteed
1206 * that fs_initcalls have completed and no need to check for NULL. But
1207 * since revoke_iomem can be called before the initcall we still need
1208 * the barrier to appease checkers.
1209 */
1210 return smp_load_acquire(&iomem_inode)->i_mapping;
1211}
1212
1213static int __request_region_locked(struct resource *res, struct resource *parent,
1214 resource_size_t start, resource_size_t n,
1215 const char *name, int flags)
1216{
1217 DECLARE_WAITQUEUE(wait, current);
1218
1219 res->name = name;
1220 res->start = start;
1221 res->end = start + n - 1;
1222
1223 for (;;) {
1224 struct resource *conflict;
1225
1226 res->flags = resource_type(parent) | resource_ext_type(parent);
1227 res->flags |= IORESOURCE_BUSY | flags;
1228 res->desc = parent->desc;
1229
1230 conflict = __request_resource(parent, res);
1231 if (!conflict)
1232 break;
1233 /*
1234 * mm/hmm.c reserves physical addresses which then
1235 * become unavailable to other users. Conflicts are
1236 * not expected. Warn to aid debugging if encountered.
1237 */
1238 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1239 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1240 conflict->name, conflict, res);
1241 }
1242 if (conflict != parent) {
1243 if (!(conflict->flags & IORESOURCE_BUSY)) {
1244 parent = conflict;
1245 continue;
1246 }
1247 }
1248 if (conflict->flags & flags & IORESOURCE_MUXED) {
1249 add_wait_queue(&muxed_resource_wait, &wait);
1250 write_unlock(&resource_lock);
1251 set_current_state(TASK_UNINTERRUPTIBLE);
1252 schedule();
1253 remove_wait_queue(&muxed_resource_wait, &wait);
1254 write_lock(&resource_lock);
1255 continue;
1256 }
1257 /* Uhhuh, that didn't work out.. */
1258 return -EBUSY;
1259 }
1260
1261 return 0;
1262}
1263
1264/**
1265 * __request_region - create a new busy resource region
1266 * @parent: parent resource descriptor
1267 * @start: resource start address
1268 * @n: resource region size
1269 * @name: reserving caller's ID string
1270 * @flags: IO resource flags
1271 */
1272struct resource *__request_region(struct resource *parent,
1273 resource_size_t start, resource_size_t n,
1274 const char *name, int flags)
1275{
1276 struct resource *res = alloc_resource(GFP_KERNEL);
1277 int ret;
1278
1279 if (!res)
1280 return NULL;
1281
1282 write_lock(&resource_lock);
1283 ret = __request_region_locked(res, parent, start, n, name, flags);
1284 write_unlock(&resource_lock);
1285
1286 if (ret) {
1287 free_resource(res);
1288 return NULL;
1289 }
1290
1291 if (parent == &iomem_resource)
1292 revoke_iomem(res);
1293
1294 return res;
1295}
1296EXPORT_SYMBOL(__request_region);
1297
1298/**
1299 * __release_region - release a previously reserved resource region
1300 * @parent: parent resource descriptor
1301 * @start: resource start address
1302 * @n: resource region size
1303 *
1304 * The described resource region must match a currently busy region.
1305 */
1306void __release_region(struct resource *parent, resource_size_t start,
1307 resource_size_t n)
1308{
1309 struct resource **p;
1310 resource_size_t end;
1311
1312 p = &parent->child;
1313 end = start + n - 1;
1314
1315 write_lock(&resource_lock);
1316
1317 for (;;) {
1318 struct resource *res = *p;
1319
1320 if (!res)
1321 break;
1322 if (res->start <= start && res->end >= end) {
1323 if (!(res->flags & IORESOURCE_BUSY)) {
1324 p = &res->child;
1325 continue;
1326 }
1327 if (res->start != start || res->end != end)
1328 break;
1329 *p = res->sibling;
1330 write_unlock(&resource_lock);
1331 if (res->flags & IORESOURCE_MUXED)
1332 wake_up(&muxed_resource_wait);
1333 free_resource(res);
1334 return;
1335 }
1336 p = &res->sibling;
1337 }
1338
1339 write_unlock(&resource_lock);
1340
1341 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1342}
1343EXPORT_SYMBOL(__release_region);
1344
1345#ifdef CONFIG_MEMORY_HOTREMOVE
1346/**
1347 * release_mem_region_adjustable - release a previously reserved memory region
1348 * @start: resource start address
1349 * @size: resource region size
1350 *
1351 * This interface is intended for memory hot-delete. The requested region
1352 * is released from a currently busy memory resource. The requested region
1353 * must either match exactly or fit into a single busy resource entry. In
1354 * the latter case, the remaining resource is adjusted accordingly.
1355 * Existing children of the busy memory resource must be immutable in the
1356 * request.
1357 *
1358 * Note:
1359 * - Additional release conditions, such as overlapping region, can be
1360 * supported after they are confirmed as valid cases.
1361 * - When a busy memory resource gets split into two entries, the code
1362 * assumes that all children remain in the lower address entry for
1363 * simplicity. Enhance this logic when necessary.
1364 */
1365void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1366{
1367 struct resource *parent = &iomem_resource;
1368 struct resource *new_res = NULL;
1369 bool alloc_nofail = false;
1370 struct resource **p;
1371 struct resource *res;
1372 resource_size_t end;
1373
1374 end = start + size - 1;
1375 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1376 return;
1377
1378 /*
1379 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1380 * just before releasing the region. This is highly unlikely to
1381 * fail - let's play save and make it never fail as the caller cannot
1382 * perform any error handling (e.g., trying to re-add memory will fail
1383 * similarly).
1384 */
1385retry:
1386 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1387
1388 p = &parent->child;
1389 write_lock(&resource_lock);
1390
1391 while ((res = *p)) {
1392 if (res->start >= end)
1393 break;
1394
1395 /* look for the next resource if it does not fit into */
1396 if (res->start > start || res->end < end) {
1397 p = &res->sibling;
1398 continue;
1399 }
1400
1401 if (!(res->flags & IORESOURCE_MEM))
1402 break;
1403
1404 if (!(res->flags & IORESOURCE_BUSY)) {
1405 p = &res->child;
1406 continue;
1407 }
1408
1409 /* found the target resource; let's adjust accordingly */
1410 if (res->start == start && res->end == end) {
1411 /* free the whole entry */
1412 *p = res->sibling;
1413 free_resource(res);
1414 } else if (res->start == start && res->end != end) {
1415 /* adjust the start */
1416 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1417 res->end - end));
1418 } else if (res->start != start && res->end == end) {
1419 /* adjust the end */
1420 WARN_ON_ONCE(__adjust_resource(res, res->start,
1421 start - res->start));
1422 } else {
1423 /* split into two entries - we need a new resource */
1424 if (!new_res) {
1425 new_res = alloc_resource(GFP_ATOMIC);
1426 if (!new_res) {
1427 alloc_nofail = true;
1428 write_unlock(&resource_lock);
1429 goto retry;
1430 }
1431 }
1432 new_res->name = res->name;
1433 new_res->start = end + 1;
1434 new_res->end = res->end;
1435 new_res->flags = res->flags;
1436 new_res->desc = res->desc;
1437 new_res->parent = res->parent;
1438 new_res->sibling = res->sibling;
1439 new_res->child = NULL;
1440
1441 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1442 start - res->start)))
1443 break;
1444 res->sibling = new_res;
1445 new_res = NULL;
1446 }
1447
1448 break;
1449 }
1450
1451 write_unlock(&resource_lock);
1452 free_resource(new_res);
1453}
1454#endif /* CONFIG_MEMORY_HOTREMOVE */
1455
1456#ifdef CONFIG_MEMORY_HOTPLUG
1457static bool system_ram_resources_mergeable(struct resource *r1,
1458 struct resource *r2)
1459{
1460 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1461 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1462 r1->name == r2->name && r1->desc == r2->desc &&
1463 !r1->child && !r2->child;
1464}
1465
1466/**
1467 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1468 * merge it with adjacent, mergeable resources
1469 * @res: resource descriptor
1470 *
1471 * This interface is intended for memory hotplug, whereby lots of contiguous
1472 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1473 * the actual resource boundaries are not of interest (e.g., it might be
1474 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1475 * same parent, and that don't have any children are considered. All mergeable
1476 * resources must be immutable during the request.
1477 *
1478 * Note:
1479 * - The caller has to make sure that no pointers to resources that are
1480 * marked mergeable are used anymore after this call - the resource might
1481 * be freed and the pointer might be stale!
1482 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1483 */
1484void merge_system_ram_resource(struct resource *res)
1485{
1486 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1487 struct resource *cur;
1488
1489 if (WARN_ON_ONCE((res->flags & flags) != flags))
1490 return;
1491
1492 write_lock(&resource_lock);
1493 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1494
1495 /* Try to merge with next item in the list. */
1496 cur = res->sibling;
1497 if (cur && system_ram_resources_mergeable(res, cur)) {
1498 res->end = cur->end;
1499 res->sibling = cur->sibling;
1500 free_resource(cur);
1501 }
1502
1503 /* Try to merge with previous item in the list. */
1504 cur = res->parent->child;
1505 while (cur && cur->sibling != res)
1506 cur = cur->sibling;
1507 if (cur && system_ram_resources_mergeable(cur, res)) {
1508 cur->end = res->end;
1509 cur->sibling = res->sibling;
1510 free_resource(res);
1511 }
1512 write_unlock(&resource_lock);
1513}
1514#endif /* CONFIG_MEMORY_HOTPLUG */
1515
1516/*
1517 * Managed region resource
1518 */
1519static void devm_resource_release(struct device *dev, void *ptr)
1520{
1521 struct resource **r = ptr;
1522
1523 release_resource(*r);
1524}
1525
1526/**
1527 * devm_request_resource() - request and reserve an I/O or memory resource
1528 * @dev: device for which to request the resource
1529 * @root: root of the resource tree from which to request the resource
1530 * @new: descriptor of the resource to request
1531 *
1532 * This is a device-managed version of request_resource(). There is usually
1533 * no need to release resources requested by this function explicitly since
1534 * that will be taken care of when the device is unbound from its driver.
1535 * If for some reason the resource needs to be released explicitly, because
1536 * of ordering issues for example, drivers must call devm_release_resource()
1537 * rather than the regular release_resource().
1538 *
1539 * When a conflict is detected between any existing resources and the newly
1540 * requested resource, an error message will be printed.
1541 *
1542 * Returns 0 on success or a negative error code on failure.
1543 */
1544int devm_request_resource(struct device *dev, struct resource *root,
1545 struct resource *new)
1546{
1547 struct resource *conflict, **ptr;
1548
1549 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1550 if (!ptr)
1551 return -ENOMEM;
1552
1553 *ptr = new;
1554
1555 conflict = request_resource_conflict(root, new);
1556 if (conflict) {
1557 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1558 new, conflict->name, conflict);
1559 devres_free(ptr);
1560 return -EBUSY;
1561 }
1562
1563 devres_add(dev, ptr);
1564 return 0;
1565}
1566EXPORT_SYMBOL(devm_request_resource);
1567
1568static int devm_resource_match(struct device *dev, void *res, void *data)
1569{
1570 struct resource **ptr = res;
1571
1572 return *ptr == data;
1573}
1574
1575/**
1576 * devm_release_resource() - release a previously requested resource
1577 * @dev: device for which to release the resource
1578 * @new: descriptor of the resource to release
1579 *
1580 * Releases a resource previously requested using devm_request_resource().
1581 */
1582void devm_release_resource(struct device *dev, struct resource *new)
1583{
1584 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1585 new));
1586}
1587EXPORT_SYMBOL(devm_release_resource);
1588
1589struct region_devres {
1590 struct resource *parent;
1591 resource_size_t start;
1592 resource_size_t n;
1593};
1594
1595static void devm_region_release(struct device *dev, void *res)
1596{
1597 struct region_devres *this = res;
1598
1599 __release_region(this->parent, this->start, this->n);
1600}
1601
1602static int devm_region_match(struct device *dev, void *res, void *match_data)
1603{
1604 struct region_devres *this = res, *match = match_data;
1605
1606 return this->parent == match->parent &&
1607 this->start == match->start && this->n == match->n;
1608}
1609
1610struct resource *
1611__devm_request_region(struct device *dev, struct resource *parent,
1612 resource_size_t start, resource_size_t n, const char *name)
1613{
1614 struct region_devres *dr = NULL;
1615 struct resource *res;
1616
1617 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1618 GFP_KERNEL);
1619 if (!dr)
1620 return NULL;
1621
1622 dr->parent = parent;
1623 dr->start = start;
1624 dr->n = n;
1625
1626 res = __request_region(parent, start, n, name, 0);
1627 if (res)
1628 devres_add(dev, dr);
1629 else
1630 devres_free(dr);
1631
1632 return res;
1633}
1634EXPORT_SYMBOL(__devm_request_region);
1635
1636void __devm_release_region(struct device *dev, struct resource *parent,
1637 resource_size_t start, resource_size_t n)
1638{
1639 struct region_devres match_data = { parent, start, n };
1640
1641 __release_region(parent, start, n);
1642 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1643 &match_data));
1644}
1645EXPORT_SYMBOL(__devm_release_region);
1646
1647/*
1648 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1649 */
1650#define MAXRESERVE 4
1651static int __init reserve_setup(char *str)
1652{
1653 static int reserved;
1654 static struct resource reserve[MAXRESERVE];
1655
1656 for (;;) {
1657 unsigned int io_start, io_num;
1658 int x = reserved;
1659 struct resource *parent;
1660
1661 if (get_option(&str, &io_start) != 2)
1662 break;
1663 if (get_option(&str, &io_num) == 0)
1664 break;
1665 if (x < MAXRESERVE) {
1666 struct resource *res = reserve + x;
1667
1668 /*
1669 * If the region starts below 0x10000, we assume it's
1670 * I/O port space; otherwise assume it's memory.
1671 */
1672 if (io_start < 0x10000) {
1673 res->flags = IORESOURCE_IO;
1674 parent = &ioport_resource;
1675 } else {
1676 res->flags = IORESOURCE_MEM;
1677 parent = &iomem_resource;
1678 }
1679 res->name = "reserved";
1680 res->start = io_start;
1681 res->end = io_start + io_num - 1;
1682 res->flags |= IORESOURCE_BUSY;
1683 res->desc = IORES_DESC_NONE;
1684 res->child = NULL;
1685 if (request_resource(parent, res) == 0)
1686 reserved = x+1;
1687 }
1688 }
1689 return 1;
1690}
1691__setup("reserve=", reserve_setup);
1692
1693/*
1694 * Check if the requested addr and size spans more than any slot in the
1695 * iomem resource tree.
1696 */
1697int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1698{
1699 resource_size_t end = addr + size - 1;
1700 struct resource *p;
1701 int err = 0;
1702
1703 read_lock(&resource_lock);
1704 for_each_resource(&iomem_resource, p, false) {
1705 /*
1706 * We can probably skip the resources without
1707 * IORESOURCE_IO attribute?
1708 */
1709 if (p->start > end)
1710 continue;
1711 if (p->end < addr)
1712 continue;
1713 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1714 PFN_DOWN(p->end) >= PFN_DOWN(end))
1715 continue;
1716 /*
1717 * if a resource is "BUSY", it's not a hardware resource
1718 * but a driver mapping of such a resource; we don't want
1719 * to warn for those; some drivers legitimately map only
1720 * partial hardware resources. (example: vesafb)
1721 */
1722 if (p->flags & IORESOURCE_BUSY)
1723 continue;
1724
1725 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1726 &addr, &end, p->name, p);
1727 err = -1;
1728 break;
1729 }
1730 read_unlock(&resource_lock);
1731
1732 return err;
1733}
1734
1735#ifdef CONFIG_STRICT_DEVMEM
1736static int strict_iomem_checks = 1;
1737#else
1738static int strict_iomem_checks;
1739#endif
1740
1741/*
1742 * Check if an address is exclusive to the kernel and must not be mapped to
1743 * user space, for example, via /dev/mem.
1744 *
1745 * Returns true if exclusive to the kernel, otherwise returns false.
1746 */
1747bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1748{
1749 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1750 IORESOURCE_EXCLUSIVE;
1751 bool skip_children = false, err = false;
1752 struct resource *p;
1753
1754 read_lock(&resource_lock);
1755 for_each_resource(root, p, skip_children) {
1756 if (p->start >= addr + size)
1757 break;
1758 if (p->end < addr) {
1759 skip_children = true;
1760 continue;
1761 }
1762 skip_children = false;
1763
1764 /*
1765 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1766 * IORESOURCE_EXCLUSIVE is set, even if they
1767 * are not busy and even if "iomem=relaxed" is set. The
1768 * responsible driver dynamically adds/removes system RAM within
1769 * such an area and uncontrolled access is dangerous.
1770 */
1771 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1772 err = true;
1773 break;
1774 }
1775
1776 /*
1777 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1778 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1779 * resource is busy.
1780 */
1781 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1782 continue;
1783 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1784 || p->flags & IORESOURCE_EXCLUSIVE) {
1785 err = true;
1786 break;
1787 }
1788 }
1789 read_unlock(&resource_lock);
1790
1791 return err;
1792}
1793
1794bool iomem_is_exclusive(u64 addr)
1795{
1796 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1797 PAGE_SIZE);
1798}
1799
1800struct resource_entry *resource_list_create_entry(struct resource *res,
1801 size_t extra_size)
1802{
1803 struct resource_entry *entry;
1804
1805 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1806 if (entry) {
1807 INIT_LIST_HEAD(&entry->node);
1808 entry->res = res ? res : &entry->__res;
1809 }
1810
1811 return entry;
1812}
1813EXPORT_SYMBOL(resource_list_create_entry);
1814
1815void resource_list_free(struct list_head *head)
1816{
1817 struct resource_entry *entry, *tmp;
1818
1819 list_for_each_entry_safe(entry, tmp, head, node)
1820 resource_list_destroy_entry(entry);
1821}
1822EXPORT_SYMBOL(resource_list_free);
1823
1824#ifdef CONFIG_GET_FREE_REGION
1825#define GFR_DESCENDING (1UL << 0)
1826#define GFR_REQUEST_REGION (1UL << 1)
1827#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1828
1829static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1830 resource_size_t align, unsigned long flags)
1831{
1832 if (flags & GFR_DESCENDING) {
1833 resource_size_t end;
1834
1835 end = min_t(resource_size_t, base->end,
1836 (1ULL << MAX_PHYSMEM_BITS) - 1);
1837 return end - size + 1;
1838 }
1839
1840 return ALIGN(base->start, align);
1841}
1842
1843static bool gfr_continue(struct resource *base, resource_size_t addr,
1844 resource_size_t size, unsigned long flags)
1845{
1846 if (flags & GFR_DESCENDING)
1847 return addr > size && addr >= base->start;
1848 /*
1849 * In the ascend case be careful that the last increment by
1850 * @size did not wrap 0.
1851 */
1852 return addr > addr - size &&
1853 addr <= min_t(resource_size_t, base->end,
1854 (1ULL << MAX_PHYSMEM_BITS) - 1);
1855}
1856
1857static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1858 unsigned long flags)
1859{
1860 if (flags & GFR_DESCENDING)
1861 return addr - size;
1862 return addr + size;
1863}
1864
1865static void remove_free_mem_region(void *_res)
1866{
1867 struct resource *res = _res;
1868
1869 if (res->parent)
1870 remove_resource(res);
1871 free_resource(res);
1872}
1873
1874static struct resource *
1875get_free_mem_region(struct device *dev, struct resource *base,
1876 resource_size_t size, const unsigned long align,
1877 const char *name, const unsigned long desc,
1878 const unsigned long flags)
1879{
1880 resource_size_t addr;
1881 struct resource *res;
1882 struct region_devres *dr = NULL;
1883
1884 size = ALIGN(size, align);
1885
1886 res = alloc_resource(GFP_KERNEL);
1887 if (!res)
1888 return ERR_PTR(-ENOMEM);
1889
1890 if (dev && (flags & GFR_REQUEST_REGION)) {
1891 dr = devres_alloc(devm_region_release,
1892 sizeof(struct region_devres), GFP_KERNEL);
1893 if (!dr) {
1894 free_resource(res);
1895 return ERR_PTR(-ENOMEM);
1896 }
1897 } else if (dev) {
1898 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1899 return ERR_PTR(-ENOMEM);
1900 }
1901
1902 write_lock(&resource_lock);
1903 for (addr = gfr_start(base, size, align, flags);
1904 gfr_continue(base, addr, align, flags);
1905 addr = gfr_next(addr, align, flags)) {
1906 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1907 REGION_DISJOINT)
1908 continue;
1909
1910 if (flags & GFR_REQUEST_REGION) {
1911 if (__request_region_locked(res, &iomem_resource, addr,
1912 size, name, 0))
1913 break;
1914
1915 if (dev) {
1916 dr->parent = &iomem_resource;
1917 dr->start = addr;
1918 dr->n = size;
1919 devres_add(dev, dr);
1920 }
1921
1922 res->desc = desc;
1923 write_unlock(&resource_lock);
1924
1925
1926 /*
1927 * A driver is claiming this region so revoke any
1928 * mappings.
1929 */
1930 revoke_iomem(res);
1931 } else {
1932 res->start = addr;
1933 res->end = addr + size - 1;
1934 res->name = name;
1935 res->desc = desc;
1936 res->flags = IORESOURCE_MEM;
1937
1938 /*
1939 * Only succeed if the resource hosts an exclusive
1940 * range after the insert
1941 */
1942 if (__insert_resource(base, res) || res->child)
1943 break;
1944
1945 write_unlock(&resource_lock);
1946 }
1947
1948 return res;
1949 }
1950 write_unlock(&resource_lock);
1951
1952 if (flags & GFR_REQUEST_REGION) {
1953 free_resource(res);
1954 devres_free(dr);
1955 } else if (dev)
1956 devm_release_action(dev, remove_free_mem_region, res);
1957
1958 return ERR_PTR(-ERANGE);
1959}
1960
1961/**
1962 * devm_request_free_mem_region - find free region for device private memory
1963 *
1964 * @dev: device struct to bind the resource to
1965 * @size: size in bytes of the device memory to add
1966 * @base: resource tree to look in
1967 *
1968 * This function tries to find an empty range of physical address big enough to
1969 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1970 * memory, which in turn allocates struct pages.
1971 */
1972struct resource *devm_request_free_mem_region(struct device *dev,
1973 struct resource *base, unsigned long size)
1974{
1975 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1976
1977 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
1978 dev_name(dev),
1979 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1980}
1981EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1982
1983struct resource *request_free_mem_region(struct resource *base,
1984 unsigned long size, const char *name)
1985{
1986 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1987
1988 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
1989 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1990}
1991EXPORT_SYMBOL_GPL(request_free_mem_region);
1992
1993/**
1994 * alloc_free_mem_region - find a free region relative to @base
1995 * @base: resource that will parent the new resource
1996 * @size: size in bytes of memory to allocate from @base
1997 * @align: alignment requirements for the allocation
1998 * @name: resource name
1999 *
2000 * Buses like CXL, that can dynamically instantiate new memory regions,
2001 * need a method to allocate physical address space for those regions.
2002 * Allocate and insert a new resource to cover a free, unclaimed by a
2003 * descendant of @base, range in the span of @base.
2004 */
2005struct resource *alloc_free_mem_region(struct resource *base,
2006 unsigned long size, unsigned long align,
2007 const char *name)
2008{
2009 /* Default of ascending direction and insert resource */
2010 unsigned long flags = 0;
2011
2012 return get_free_mem_region(NULL, base, size, align, name,
2013 IORES_DESC_NONE, flags);
2014}
2015EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
2016#endif /* CONFIG_GET_FREE_REGION */
2017
2018static int __init strict_iomem(char *str)
2019{
2020 if (strstr(str, "relaxed"))
2021 strict_iomem_checks = 0;
2022 if (strstr(str, "strict"))
2023 strict_iomem_checks = 1;
2024 return 1;
2025}
2026
2027static int iomem_fs_init_fs_context(struct fs_context *fc)
2028{
2029 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2030}
2031
2032static struct file_system_type iomem_fs_type = {
2033 .name = "iomem",
2034 .owner = THIS_MODULE,
2035 .init_fs_context = iomem_fs_init_fs_context,
2036 .kill_sb = kill_anon_super,
2037};
2038
2039static int __init iomem_init_inode(void)
2040{
2041 static struct vfsmount *iomem_vfs_mount;
2042 static int iomem_fs_cnt;
2043 struct inode *inode;
2044 int rc;
2045
2046 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2047 if (rc < 0) {
2048 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2049 return rc;
2050 }
2051
2052 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2053 if (IS_ERR(inode)) {
2054 rc = PTR_ERR(inode);
2055 pr_err("Cannot allocate inode for iomem: %d\n", rc);
2056 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2057 return rc;
2058 }
2059
2060 /*
2061 * Publish iomem revocation inode initialized.
2062 * Pairs with smp_load_acquire() in revoke_iomem().
2063 */
2064 smp_store_release(&iomem_inode, inode);
2065
2066 return 0;
2067}
2068
2069fs_initcall(iomem_init_inode);
2070
2071__setup("iomem=", strict_iomem);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/proc_fs.h>
21#include <linux/pseudo_fs.h>
22#include <linux/sched.h>
23#include <linux/seq_file.h>
24#include <linux/device.h>
25#include <linux/pfn.h>
26#include <linux/mm.h>
27#include <linux/mount.h>
28#include <linux/resource_ext.h>
29#include <uapi/linux/magic.h>
30#include <linux/string.h>
31#include <linux/vmalloc.h>
32#include <asm/io.h>
33
34
35struct resource ioport_resource = {
36 .name = "PCI IO",
37 .start = 0,
38 .end = IO_SPACE_LIMIT,
39 .flags = IORESOURCE_IO,
40};
41EXPORT_SYMBOL(ioport_resource);
42
43struct resource iomem_resource = {
44 .name = "PCI mem",
45 .start = 0,
46 .end = -1,
47 .flags = IORESOURCE_MEM,
48};
49EXPORT_SYMBOL(iomem_resource);
50
51static DEFINE_RWLOCK(resource_lock);
52
53/*
54 * Return the next node of @p in pre-order tree traversal. If
55 * @skip_children is true, skip the descendant nodes of @p in
56 * traversal. If @p is a descendant of @subtree_root, only traverse
57 * the subtree under @subtree_root.
58 */
59static struct resource *next_resource(struct resource *p, bool skip_children,
60 struct resource *subtree_root)
61{
62 if (!skip_children && p->child)
63 return p->child;
64 while (!p->sibling && p->parent) {
65 p = p->parent;
66 if (p == subtree_root)
67 return NULL;
68 }
69 return p->sibling;
70}
71
72/*
73 * Traverse the resource subtree under @_root in pre-order, excluding
74 * @_root itself.
75 *
76 * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop.
77 * And it is referenced to avoid unused variable warning.
78 */
79#define for_each_resource(_root, _p, _skip_children) \
80 for (typeof(_root) __root = (_root), __p = _p = __root->child; \
81 __p && _p; _p = next_resource(_p, _skip_children, __root))
82
83#ifdef CONFIG_PROC_FS
84
85enum { MAX_IORES_LEVEL = 5 };
86
87static void *r_start(struct seq_file *m, loff_t *pos)
88 __acquires(resource_lock)
89{
90 struct resource *root = pde_data(file_inode(m->file));
91 struct resource *p;
92 loff_t l = *pos;
93
94 read_lock(&resource_lock);
95 for_each_resource(root, p, false) {
96 if (l-- == 0)
97 break;
98 }
99
100 return p;
101}
102
103static void *r_next(struct seq_file *m, void *v, loff_t *pos)
104{
105 struct resource *p = v;
106
107 (*pos)++;
108
109 return (void *)next_resource(p, false, NULL);
110}
111
112static void r_stop(struct seq_file *m, void *v)
113 __releases(resource_lock)
114{
115 read_unlock(&resource_lock);
116}
117
118static int r_show(struct seq_file *m, void *v)
119{
120 struct resource *root = pde_data(file_inode(m->file));
121 struct resource *r = v, *p;
122 unsigned long long start, end;
123 int width = root->end < 0x10000 ? 4 : 8;
124 int depth;
125
126 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
127 if (p->parent == root)
128 break;
129
130 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
131 start = r->start;
132 end = r->end;
133 } else {
134 start = end = 0;
135 }
136
137 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
138 depth * 2, "",
139 width, start,
140 width, end,
141 r->name ? r->name : "<BAD>");
142 return 0;
143}
144
145static const struct seq_operations resource_op = {
146 .start = r_start,
147 .next = r_next,
148 .stop = r_stop,
149 .show = r_show,
150};
151
152static int __init ioresources_init(void)
153{
154 proc_create_seq_data("ioports", 0, NULL, &resource_op,
155 &ioport_resource);
156 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
157 return 0;
158}
159__initcall(ioresources_init);
160
161#endif /* CONFIG_PROC_FS */
162
163static void free_resource(struct resource *res)
164{
165 /**
166 * If the resource was allocated using memblock early during boot
167 * we'll leak it here: we can only return full pages back to the
168 * buddy and trying to be smart and reusing them eventually in
169 * alloc_resource() overcomplicates resource handling.
170 */
171 if (res && PageSlab(virt_to_head_page(res)))
172 kfree(res);
173}
174
175static struct resource *alloc_resource(gfp_t flags)
176{
177 return kzalloc(sizeof(struct resource), flags);
178}
179
180/* Return the conflict entry if you can't request it */
181static struct resource * __request_resource(struct resource *root, struct resource *new)
182{
183 resource_size_t start = new->start;
184 resource_size_t end = new->end;
185 struct resource *tmp, **p;
186
187 if (end < start)
188 return root;
189 if (start < root->start)
190 return root;
191 if (end > root->end)
192 return root;
193 p = &root->child;
194 for (;;) {
195 tmp = *p;
196 if (!tmp || tmp->start > end) {
197 new->sibling = tmp;
198 *p = new;
199 new->parent = root;
200 return NULL;
201 }
202 p = &tmp->sibling;
203 if (tmp->end < start)
204 continue;
205 return tmp;
206 }
207}
208
209static int __release_resource(struct resource *old, bool release_child)
210{
211 struct resource *tmp, **p, *chd;
212
213 p = &old->parent->child;
214 for (;;) {
215 tmp = *p;
216 if (!tmp)
217 break;
218 if (tmp == old) {
219 if (release_child || !(tmp->child)) {
220 *p = tmp->sibling;
221 } else {
222 for (chd = tmp->child;; chd = chd->sibling) {
223 chd->parent = tmp->parent;
224 if (!(chd->sibling))
225 break;
226 }
227 *p = tmp->child;
228 chd->sibling = tmp->sibling;
229 }
230 old->parent = NULL;
231 return 0;
232 }
233 p = &tmp->sibling;
234 }
235 return -EINVAL;
236}
237
238static void __release_child_resources(struct resource *r)
239{
240 struct resource *tmp, *p;
241 resource_size_t size;
242
243 p = r->child;
244 r->child = NULL;
245 while (p) {
246 tmp = p;
247 p = p->sibling;
248
249 tmp->parent = NULL;
250 tmp->sibling = NULL;
251 __release_child_resources(tmp);
252
253 printk(KERN_DEBUG "release child resource %pR\n", tmp);
254 /* need to restore size, and keep flags */
255 size = resource_size(tmp);
256 tmp->start = 0;
257 tmp->end = size - 1;
258 }
259}
260
261void release_child_resources(struct resource *r)
262{
263 write_lock(&resource_lock);
264 __release_child_resources(r);
265 write_unlock(&resource_lock);
266}
267
268/**
269 * request_resource_conflict - request and reserve an I/O or memory resource
270 * @root: root resource descriptor
271 * @new: resource descriptor desired by caller
272 *
273 * Returns 0 for success, conflict resource on error.
274 */
275struct resource *request_resource_conflict(struct resource *root, struct resource *new)
276{
277 struct resource *conflict;
278
279 write_lock(&resource_lock);
280 conflict = __request_resource(root, new);
281 write_unlock(&resource_lock);
282 return conflict;
283}
284
285/**
286 * request_resource - request and reserve an I/O or memory resource
287 * @root: root resource descriptor
288 * @new: resource descriptor desired by caller
289 *
290 * Returns 0 for success, negative error code on error.
291 */
292int request_resource(struct resource *root, struct resource *new)
293{
294 struct resource *conflict;
295
296 conflict = request_resource_conflict(root, new);
297 return conflict ? -EBUSY : 0;
298}
299
300EXPORT_SYMBOL(request_resource);
301
302/**
303 * release_resource - release a previously reserved resource
304 * @old: resource pointer
305 */
306int release_resource(struct resource *old)
307{
308 int retval;
309
310 write_lock(&resource_lock);
311 retval = __release_resource(old, true);
312 write_unlock(&resource_lock);
313 return retval;
314}
315
316EXPORT_SYMBOL(release_resource);
317
318static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc)
319{
320 return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc);
321}
322
323/**
324 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
325 * [@start..@end].
326 *
327 * If a resource is found, returns 0 and @*res is overwritten with the part
328 * of the resource that's within [@start..@end]; if none is found, returns
329 * -ENODEV. Returns -EINVAL for invalid parameters.
330 *
331 * @start: start address of the resource searched for
332 * @end: end address of same resource
333 * @flags: flags which the resource must have
334 * @desc: descriptor the resource must have
335 * @res: return ptr, if resource found
336 *
337 * The caller must specify @start, @end, @flags, and @desc
338 * (which may be IORES_DESC_NONE).
339 */
340static int find_next_iomem_res(resource_size_t start, resource_size_t end,
341 unsigned long flags, unsigned long desc,
342 struct resource *res)
343{
344 struct resource *p;
345
346 if (!res)
347 return -EINVAL;
348
349 if (start >= end)
350 return -EINVAL;
351
352 read_lock(&resource_lock);
353
354 for_each_resource(&iomem_resource, p, false) {
355 /* If we passed the resource we are looking for, stop */
356 if (p->start > end) {
357 p = NULL;
358 break;
359 }
360
361 /* Skip until we find a range that matches what we look for */
362 if (p->end < start)
363 continue;
364
365 /* Found a match, break */
366 if (is_type_match(p, flags, desc))
367 break;
368 }
369
370 if (p) {
371 /* copy data */
372 *res = (struct resource) {
373 .start = max(start, p->start),
374 .end = min(end, p->end),
375 .flags = p->flags,
376 .desc = p->desc,
377 .parent = p->parent,
378 };
379 }
380
381 read_unlock(&resource_lock);
382 return p ? 0 : -ENODEV;
383}
384
385static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
386 unsigned long flags, unsigned long desc,
387 void *arg,
388 int (*func)(struct resource *, void *))
389{
390 struct resource res;
391 int ret = -EINVAL;
392
393 while (start < end &&
394 !find_next_iomem_res(start, end, flags, desc, &res)) {
395 ret = (*func)(&res, arg);
396 if (ret)
397 break;
398
399 start = res.end + 1;
400 }
401
402 return ret;
403}
404
405/**
406 * walk_iomem_res_desc - Walks through iomem resources and calls func()
407 * with matching resource ranges.
408 * *
409 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
410 * @flags: I/O resource flags
411 * @start: start addr
412 * @end: end addr
413 * @arg: function argument for the callback @func
414 * @func: callback function that is called for each qualifying resource area
415 *
416 * All the memory ranges which overlap start,end and also match flags and
417 * desc are valid candidates.
418 *
419 * NOTE: For a new descriptor search, define a new IORES_DESC in
420 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
421 */
422int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
423 u64 end, void *arg, int (*func)(struct resource *, void *))
424{
425 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
426}
427EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
428
429/*
430 * This function calls the @func callback against all memory ranges of type
431 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
432 * Now, this function is only for System RAM, it deals with full ranges and
433 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
434 * ranges.
435 */
436int walk_system_ram_res(u64 start, u64 end, void *arg,
437 int (*func)(struct resource *, void *))
438{
439 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
440
441 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
442 func);
443}
444
445/*
446 * This function, being a variant of walk_system_ram_res(), calls the @func
447 * callback against all memory ranges of type System RAM which are marked as
448 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
449 * higher to lower.
450 */
451int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
452 int (*func)(struct resource *, void *))
453{
454 struct resource res, *rams;
455 int rams_size = 16, i;
456 unsigned long flags;
457 int ret = -1;
458
459 /* create a list */
460 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
461 if (!rams)
462 return ret;
463
464 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
465 i = 0;
466 while ((start < end) &&
467 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
468 if (i >= rams_size) {
469 /* re-alloc */
470 struct resource *rams_new;
471
472 rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
473 GFP_KERNEL);
474 if (!rams_new)
475 goto out;
476
477 rams = rams_new;
478 rams_size += 16;
479 }
480
481 rams[i++] = res;
482 start = res.end + 1;
483 }
484
485 /* go reverse */
486 for (i--; i >= 0; i--) {
487 ret = (*func)(&rams[i], arg);
488 if (ret)
489 break;
490 }
491
492out:
493 kvfree(rams);
494 return ret;
495}
496
497/*
498 * This function calls the @func callback against all memory ranges, which
499 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
500 */
501int walk_mem_res(u64 start, u64 end, void *arg,
502 int (*func)(struct resource *, void *))
503{
504 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
505
506 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
507 func);
508}
509
510/*
511 * This function calls the @func callback against all memory ranges of type
512 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
513 * It is to be used only for System RAM.
514 */
515int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
516 void *arg, int (*func)(unsigned long, unsigned long, void *))
517{
518 resource_size_t start, end;
519 unsigned long flags;
520 struct resource res;
521 unsigned long pfn, end_pfn;
522 int ret = -EINVAL;
523
524 start = (u64) start_pfn << PAGE_SHIFT;
525 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
526 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
527 while (start < end &&
528 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
529 pfn = PFN_UP(res.start);
530 end_pfn = PFN_DOWN(res.end + 1);
531 if (end_pfn > pfn)
532 ret = (*func)(pfn, end_pfn - pfn, arg);
533 if (ret)
534 break;
535 start = res.end + 1;
536 }
537 return ret;
538}
539
540static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
541{
542 return 1;
543}
544
545/*
546 * This generic page_is_ram() returns true if specified address is
547 * registered as System RAM in iomem_resource list.
548 */
549int __weak page_is_ram(unsigned long pfn)
550{
551 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
552}
553EXPORT_SYMBOL_GPL(page_is_ram);
554
555static int __region_intersects(struct resource *parent, resource_size_t start,
556 size_t size, unsigned long flags,
557 unsigned long desc)
558{
559 int type = 0; int other = 0;
560 struct resource *p, *dp;
561 struct resource res, o;
562 bool covered;
563
564 res.start = start;
565 res.end = start + size - 1;
566
567 for (p = parent->child; p ; p = p->sibling) {
568 if (!resource_intersection(p, &res, &o))
569 continue;
570 if (is_type_match(p, flags, desc)) {
571 type++;
572 continue;
573 }
574 /*
575 * Continue to search in descendant resources as if the
576 * matched descendant resources cover some ranges of 'p'.
577 *
578 * |------------- "CXL Window 0" ------------|
579 * |-- "System RAM" --|
580 *
581 * will behave similar as the following fake resource
582 * tree when searching "System RAM".
583 *
584 * |-- "System RAM" --||-- "CXL Window 0a" --|
585 */
586 covered = false;
587 for_each_resource(p, dp, false) {
588 if (!resource_overlaps(dp, &res))
589 continue;
590 if (is_type_match(dp, flags, desc)) {
591 type++;
592 /*
593 * Range from 'o.start' to 'dp->start'
594 * isn't covered by matched resource.
595 */
596 if (dp->start > o.start)
597 break;
598 if (dp->end >= o.end) {
599 covered = true;
600 break;
601 }
602 /* Remove covered range */
603 o.start = max(o.start, dp->end + 1);
604 }
605 }
606 if (!covered)
607 other++;
608 }
609
610 if (type == 0)
611 return REGION_DISJOINT;
612
613 if (other == 0)
614 return REGION_INTERSECTS;
615
616 return REGION_MIXED;
617}
618
619/**
620 * region_intersects() - determine intersection of region with known resources
621 * @start: region start address
622 * @size: size of region
623 * @flags: flags of resource (in iomem_resource)
624 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
625 *
626 * Check if the specified region partially overlaps or fully eclipses a
627 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
628 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
629 * return REGION_MIXED if the region overlaps @flags/@desc and another
630 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
631 * and no other defined resource. Note that REGION_INTERSECTS is also
632 * returned in the case when the specified region overlaps RAM and undefined
633 * memory holes.
634 *
635 * region_intersect() is used by memory remapping functions to ensure
636 * the user is not remapping RAM and is a vast speed up over walking
637 * through the resource table page by page.
638 */
639int region_intersects(resource_size_t start, size_t size, unsigned long flags,
640 unsigned long desc)
641{
642 int ret;
643
644 read_lock(&resource_lock);
645 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
646 read_unlock(&resource_lock);
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(region_intersects);
651
652void __weak arch_remove_reservations(struct resource *avail)
653{
654}
655
656static void resource_clip(struct resource *res, resource_size_t min,
657 resource_size_t max)
658{
659 if (res->start < min)
660 res->start = min;
661 if (res->end > max)
662 res->end = max;
663}
664
665/*
666 * Find empty space in the resource tree with the given range and
667 * alignment constraints
668 */
669static int __find_resource_space(struct resource *root, struct resource *old,
670 struct resource *new, resource_size_t size,
671 struct resource_constraint *constraint)
672{
673 struct resource *this = root->child;
674 struct resource tmp = *new, avail, alloc;
675 resource_alignf alignf = constraint->alignf;
676
677 tmp.start = root->start;
678 /*
679 * Skip past an allocated resource that starts at 0, since the assignment
680 * of this->start - 1 to tmp->end below would cause an underflow.
681 */
682 if (this && this->start == root->start) {
683 tmp.start = (this == old) ? old->start : this->end + 1;
684 this = this->sibling;
685 }
686 for(;;) {
687 if (this)
688 tmp.end = (this == old) ? this->end : this->start - 1;
689 else
690 tmp.end = root->end;
691
692 if (tmp.end < tmp.start)
693 goto next;
694
695 resource_clip(&tmp, constraint->min, constraint->max);
696 arch_remove_reservations(&tmp);
697
698 /* Check for overflow after ALIGN() */
699 avail.start = ALIGN(tmp.start, constraint->align);
700 avail.end = tmp.end;
701 avail.flags = new->flags & ~IORESOURCE_UNSET;
702 if (avail.start >= tmp.start) {
703 alloc.flags = avail.flags;
704 if (alignf) {
705 alloc.start = alignf(constraint->alignf_data,
706 &avail, size, constraint->align);
707 } else {
708 alloc.start = avail.start;
709 }
710 alloc.end = alloc.start + size - 1;
711 if (alloc.start <= alloc.end &&
712 resource_contains(&avail, &alloc)) {
713 new->start = alloc.start;
714 new->end = alloc.end;
715 return 0;
716 }
717 }
718
719next: if (!this || this->end == root->end)
720 break;
721
722 if (this != old)
723 tmp.start = this->end + 1;
724 this = this->sibling;
725 }
726 return -EBUSY;
727}
728
729/**
730 * find_resource_space - Find empty space in the resource tree
731 * @root: Root resource descriptor
732 * @new: Resource descriptor awaiting an empty resource space
733 * @size: The minimum size of the empty space
734 * @constraint: The range and alignment constraints to be met
735 *
736 * Finds an empty space under @root in the resource tree satisfying range and
737 * alignment @constraints.
738 *
739 * Return:
740 * * %0 - if successful, @new members start, end, and flags are altered.
741 * * %-EBUSY - if no empty space was found.
742 */
743int find_resource_space(struct resource *root, struct resource *new,
744 resource_size_t size,
745 struct resource_constraint *constraint)
746{
747 return __find_resource_space(root, NULL, new, size, constraint);
748}
749EXPORT_SYMBOL_GPL(find_resource_space);
750
751/**
752 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
753 * The resource will be relocated if the new size cannot be reallocated in the
754 * current location.
755 *
756 * @root: root resource descriptor
757 * @old: resource descriptor desired by caller
758 * @newsize: new size of the resource descriptor
759 * @constraint: the memory range and alignment constraints to be met.
760 */
761static int reallocate_resource(struct resource *root, struct resource *old,
762 resource_size_t newsize,
763 struct resource_constraint *constraint)
764{
765 int err=0;
766 struct resource new = *old;
767 struct resource *conflict;
768
769 write_lock(&resource_lock);
770
771 if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
772 goto out;
773
774 if (resource_contains(&new, old)) {
775 old->start = new.start;
776 old->end = new.end;
777 goto out;
778 }
779
780 if (old->child) {
781 err = -EBUSY;
782 goto out;
783 }
784
785 if (resource_contains(old, &new)) {
786 old->start = new.start;
787 old->end = new.end;
788 } else {
789 __release_resource(old, true);
790 *old = new;
791 conflict = __request_resource(root, old);
792 BUG_ON(conflict);
793 }
794out:
795 write_unlock(&resource_lock);
796 return err;
797}
798
799
800/**
801 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
802 * The resource will be reallocated with a new size if it was already allocated
803 * @root: root resource descriptor
804 * @new: resource descriptor desired by caller
805 * @size: requested resource region size
806 * @min: minimum boundary to allocate
807 * @max: maximum boundary to allocate
808 * @align: alignment requested, in bytes
809 * @alignf: alignment function, optional, called if not NULL
810 * @alignf_data: arbitrary data to pass to the @alignf function
811 */
812int allocate_resource(struct resource *root, struct resource *new,
813 resource_size_t size, resource_size_t min,
814 resource_size_t max, resource_size_t align,
815 resource_alignf alignf,
816 void *alignf_data)
817{
818 int err;
819 struct resource_constraint constraint;
820
821 constraint.min = min;
822 constraint.max = max;
823 constraint.align = align;
824 constraint.alignf = alignf;
825 constraint.alignf_data = alignf_data;
826
827 if ( new->parent ) {
828 /* resource is already allocated, try reallocating with
829 the new constraints */
830 return reallocate_resource(root, new, size, &constraint);
831 }
832
833 write_lock(&resource_lock);
834 err = find_resource_space(root, new, size, &constraint);
835 if (err >= 0 && __request_resource(root, new))
836 err = -EBUSY;
837 write_unlock(&resource_lock);
838 return err;
839}
840
841EXPORT_SYMBOL(allocate_resource);
842
843/**
844 * lookup_resource - find an existing resource by a resource start address
845 * @root: root resource descriptor
846 * @start: resource start address
847 *
848 * Returns a pointer to the resource if found, NULL otherwise
849 */
850struct resource *lookup_resource(struct resource *root, resource_size_t start)
851{
852 struct resource *res;
853
854 read_lock(&resource_lock);
855 for (res = root->child; res; res = res->sibling) {
856 if (res->start == start)
857 break;
858 }
859 read_unlock(&resource_lock);
860
861 return res;
862}
863
864/*
865 * Insert a resource into the resource tree. If successful, return NULL,
866 * otherwise return the conflicting resource (compare to __request_resource())
867 */
868static struct resource * __insert_resource(struct resource *parent, struct resource *new)
869{
870 struct resource *first, *next;
871
872 for (;; parent = first) {
873 first = __request_resource(parent, new);
874 if (!first)
875 return first;
876
877 if (first == parent)
878 return first;
879 if (WARN_ON(first == new)) /* duplicated insertion */
880 return first;
881
882 if ((first->start > new->start) || (first->end < new->end))
883 break;
884 if ((first->start == new->start) && (first->end == new->end))
885 break;
886 }
887
888 for (next = first; ; next = next->sibling) {
889 /* Partial overlap? Bad, and unfixable */
890 if (next->start < new->start || next->end > new->end)
891 return next;
892 if (!next->sibling)
893 break;
894 if (next->sibling->start > new->end)
895 break;
896 }
897
898 new->parent = parent;
899 new->sibling = next->sibling;
900 new->child = first;
901
902 next->sibling = NULL;
903 for (next = first; next; next = next->sibling)
904 next->parent = new;
905
906 if (parent->child == first) {
907 parent->child = new;
908 } else {
909 next = parent->child;
910 while (next->sibling != first)
911 next = next->sibling;
912 next->sibling = new;
913 }
914 return NULL;
915}
916
917/**
918 * insert_resource_conflict - Inserts resource in the resource tree
919 * @parent: parent of the new resource
920 * @new: new resource to insert
921 *
922 * Returns 0 on success, conflict resource if the resource can't be inserted.
923 *
924 * This function is equivalent to request_resource_conflict when no conflict
925 * happens. If a conflict happens, and the conflicting resources
926 * entirely fit within the range of the new resource, then the new
927 * resource is inserted and the conflicting resources become children of
928 * the new resource.
929 *
930 * This function is intended for producers of resources, such as FW modules
931 * and bus drivers.
932 */
933struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
934{
935 struct resource *conflict;
936
937 write_lock(&resource_lock);
938 conflict = __insert_resource(parent, new);
939 write_unlock(&resource_lock);
940 return conflict;
941}
942
943/**
944 * insert_resource - Inserts a resource in the resource tree
945 * @parent: parent of the new resource
946 * @new: new resource to insert
947 *
948 * Returns 0 on success, -EBUSY if the resource can't be inserted.
949 *
950 * This function is intended for producers of resources, such as FW modules
951 * and bus drivers.
952 */
953int insert_resource(struct resource *parent, struct resource *new)
954{
955 struct resource *conflict;
956
957 conflict = insert_resource_conflict(parent, new);
958 return conflict ? -EBUSY : 0;
959}
960EXPORT_SYMBOL_GPL(insert_resource);
961
962/**
963 * insert_resource_expand_to_fit - Insert a resource into the resource tree
964 * @root: root resource descriptor
965 * @new: new resource to insert
966 *
967 * Insert a resource into the resource tree, possibly expanding it in order
968 * to make it encompass any conflicting resources.
969 */
970void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
971{
972 if (new->parent)
973 return;
974
975 write_lock(&resource_lock);
976 for (;;) {
977 struct resource *conflict;
978
979 conflict = __insert_resource(root, new);
980 if (!conflict)
981 break;
982 if (conflict == root)
983 break;
984
985 /* Ok, expand resource to cover the conflict, then try again .. */
986 if (conflict->start < new->start)
987 new->start = conflict->start;
988 if (conflict->end > new->end)
989 new->end = conflict->end;
990
991 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
992 }
993 write_unlock(&resource_lock);
994}
995/*
996 * Not for general consumption, only early boot memory map parsing, PCI
997 * resource discovery, and late discovery of CXL resources are expected
998 * to use this interface. The former are built-in and only the latter,
999 * CXL, is a module.
1000 */
1001EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL");
1002
1003/**
1004 * remove_resource - Remove a resource in the resource tree
1005 * @old: resource to remove
1006 *
1007 * Returns 0 on success, -EINVAL if the resource is not valid.
1008 *
1009 * This function removes a resource previously inserted by insert_resource()
1010 * or insert_resource_conflict(), and moves the children (if any) up to
1011 * where they were before. insert_resource() and insert_resource_conflict()
1012 * insert a new resource, and move any conflicting resources down to the
1013 * children of the new resource.
1014 *
1015 * insert_resource(), insert_resource_conflict() and remove_resource() are
1016 * intended for producers of resources, such as FW modules and bus drivers.
1017 */
1018int remove_resource(struct resource *old)
1019{
1020 int retval;
1021
1022 write_lock(&resource_lock);
1023 retval = __release_resource(old, false);
1024 write_unlock(&resource_lock);
1025 return retval;
1026}
1027EXPORT_SYMBOL_GPL(remove_resource);
1028
1029static int __adjust_resource(struct resource *res, resource_size_t start,
1030 resource_size_t size)
1031{
1032 struct resource *tmp, *parent = res->parent;
1033 resource_size_t end = start + size - 1;
1034 int result = -EBUSY;
1035
1036 if (!parent)
1037 goto skip;
1038
1039 if ((start < parent->start) || (end > parent->end))
1040 goto out;
1041
1042 if (res->sibling && (res->sibling->start <= end))
1043 goto out;
1044
1045 tmp = parent->child;
1046 if (tmp != res) {
1047 while (tmp->sibling != res)
1048 tmp = tmp->sibling;
1049 if (start <= tmp->end)
1050 goto out;
1051 }
1052
1053skip:
1054 for (tmp = res->child; tmp; tmp = tmp->sibling)
1055 if ((tmp->start < start) || (tmp->end > end))
1056 goto out;
1057
1058 res->start = start;
1059 res->end = end;
1060 result = 0;
1061
1062 out:
1063 return result;
1064}
1065
1066/**
1067 * adjust_resource - modify a resource's start and size
1068 * @res: resource to modify
1069 * @start: new start value
1070 * @size: new size
1071 *
1072 * Given an existing resource, change its start and size to match the
1073 * arguments. Returns 0 on success, -EBUSY if it can't fit.
1074 * Existing children of the resource are assumed to be immutable.
1075 */
1076int adjust_resource(struct resource *res, resource_size_t start,
1077 resource_size_t size)
1078{
1079 int result;
1080
1081 write_lock(&resource_lock);
1082 result = __adjust_resource(res, start, size);
1083 write_unlock(&resource_lock);
1084 return result;
1085}
1086EXPORT_SYMBOL(adjust_resource);
1087
1088static void __init
1089__reserve_region_with_split(struct resource *root, resource_size_t start,
1090 resource_size_t end, const char *name)
1091{
1092 struct resource *parent = root;
1093 struct resource *conflict;
1094 struct resource *res = alloc_resource(GFP_ATOMIC);
1095 struct resource *next_res = NULL;
1096 int type = resource_type(root);
1097
1098 if (!res)
1099 return;
1100
1101 res->name = name;
1102 res->start = start;
1103 res->end = end;
1104 res->flags = type | IORESOURCE_BUSY;
1105 res->desc = IORES_DESC_NONE;
1106
1107 while (1) {
1108
1109 conflict = __request_resource(parent, res);
1110 if (!conflict) {
1111 if (!next_res)
1112 break;
1113 res = next_res;
1114 next_res = NULL;
1115 continue;
1116 }
1117
1118 /* conflict covered whole area */
1119 if (conflict->start <= res->start &&
1120 conflict->end >= res->end) {
1121 free_resource(res);
1122 WARN_ON(next_res);
1123 break;
1124 }
1125
1126 /* failed, split and try again */
1127 if (conflict->start > res->start) {
1128 end = res->end;
1129 res->end = conflict->start - 1;
1130 if (conflict->end < end) {
1131 next_res = alloc_resource(GFP_ATOMIC);
1132 if (!next_res) {
1133 free_resource(res);
1134 break;
1135 }
1136 next_res->name = name;
1137 next_res->start = conflict->end + 1;
1138 next_res->end = end;
1139 next_res->flags = type | IORESOURCE_BUSY;
1140 next_res->desc = IORES_DESC_NONE;
1141 }
1142 } else {
1143 res->start = conflict->end + 1;
1144 }
1145 }
1146
1147}
1148
1149void __init
1150reserve_region_with_split(struct resource *root, resource_size_t start,
1151 resource_size_t end, const char *name)
1152{
1153 int abort = 0;
1154
1155 write_lock(&resource_lock);
1156 if (root->start > start || root->end < end) {
1157 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1158 (unsigned long long)start, (unsigned long long)end,
1159 root);
1160 if (start > root->end || end < root->start)
1161 abort = 1;
1162 else {
1163 if (end > root->end)
1164 end = root->end;
1165 if (start < root->start)
1166 start = root->start;
1167 pr_err("fixing request to [0x%llx-0x%llx]\n",
1168 (unsigned long long)start,
1169 (unsigned long long)end);
1170 }
1171 dump_stack();
1172 }
1173 if (!abort)
1174 __reserve_region_with_split(root, start, end, name);
1175 write_unlock(&resource_lock);
1176}
1177
1178/**
1179 * resource_alignment - calculate resource's alignment
1180 * @res: resource pointer
1181 *
1182 * Returns alignment on success, 0 (invalid alignment) on failure.
1183 */
1184resource_size_t resource_alignment(struct resource *res)
1185{
1186 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1187 case IORESOURCE_SIZEALIGN:
1188 return resource_size(res);
1189 case IORESOURCE_STARTALIGN:
1190 return res->start;
1191 default:
1192 return 0;
1193 }
1194}
1195
1196/*
1197 * This is compatibility stuff for IO resources.
1198 *
1199 * Note how this, unlike the above, knows about
1200 * the IO flag meanings (busy etc).
1201 *
1202 * request_region creates a new busy region.
1203 *
1204 * release_region releases a matching busy region.
1205 */
1206
1207static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1208
1209static struct inode *iomem_inode;
1210
1211#ifdef CONFIG_IO_STRICT_DEVMEM
1212static void revoke_iomem(struct resource *res)
1213{
1214 /* pairs with smp_store_release() in iomem_init_inode() */
1215 struct inode *inode = smp_load_acquire(&iomem_inode);
1216
1217 /*
1218 * Check that the initialization has completed. Losing the race
1219 * is ok because it means drivers are claiming resources before
1220 * the fs_initcall level of init and prevent iomem_get_mapping users
1221 * from establishing mappings.
1222 */
1223 if (!inode)
1224 return;
1225
1226 /*
1227 * The expectation is that the driver has successfully marked
1228 * the resource busy by this point, so devmem_is_allowed()
1229 * should start returning false, however for performance this
1230 * does not iterate the entire resource range.
1231 */
1232 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1233 devmem_is_allowed(PHYS_PFN(res->end))) {
1234 /*
1235 * *cringe* iomem=relaxed says "go ahead, what's the
1236 * worst that can happen?"
1237 */
1238 return;
1239 }
1240
1241 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1242}
1243#else
1244static void revoke_iomem(struct resource *res) {}
1245#endif
1246
1247struct address_space *iomem_get_mapping(void)
1248{
1249 /*
1250 * This function is only called from file open paths, hence guaranteed
1251 * that fs_initcalls have completed and no need to check for NULL. But
1252 * since revoke_iomem can be called before the initcall we still need
1253 * the barrier to appease checkers.
1254 */
1255 return smp_load_acquire(&iomem_inode)->i_mapping;
1256}
1257
1258static int __request_region_locked(struct resource *res, struct resource *parent,
1259 resource_size_t start, resource_size_t n,
1260 const char *name, int flags)
1261{
1262 DECLARE_WAITQUEUE(wait, current);
1263
1264 res->name = name;
1265 res->start = start;
1266 res->end = start + n - 1;
1267
1268 for (;;) {
1269 struct resource *conflict;
1270
1271 res->flags = resource_type(parent) | resource_ext_type(parent);
1272 res->flags |= IORESOURCE_BUSY | flags;
1273 res->desc = parent->desc;
1274
1275 conflict = __request_resource(parent, res);
1276 if (!conflict)
1277 break;
1278 /*
1279 * mm/hmm.c reserves physical addresses which then
1280 * become unavailable to other users. Conflicts are
1281 * not expected. Warn to aid debugging if encountered.
1282 */
1283 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1284 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1285 conflict->name, conflict, res);
1286 }
1287 if (conflict != parent) {
1288 if (!(conflict->flags & IORESOURCE_BUSY)) {
1289 parent = conflict;
1290 continue;
1291 }
1292 }
1293 if (conflict->flags & flags & IORESOURCE_MUXED) {
1294 add_wait_queue(&muxed_resource_wait, &wait);
1295 write_unlock(&resource_lock);
1296 set_current_state(TASK_UNINTERRUPTIBLE);
1297 schedule();
1298 remove_wait_queue(&muxed_resource_wait, &wait);
1299 write_lock(&resource_lock);
1300 continue;
1301 }
1302 /* Uhhuh, that didn't work out.. */
1303 return -EBUSY;
1304 }
1305
1306 return 0;
1307}
1308
1309/**
1310 * __request_region - create a new busy resource region
1311 * @parent: parent resource descriptor
1312 * @start: resource start address
1313 * @n: resource region size
1314 * @name: reserving caller's ID string
1315 * @flags: IO resource flags
1316 */
1317struct resource *__request_region(struct resource *parent,
1318 resource_size_t start, resource_size_t n,
1319 const char *name, int flags)
1320{
1321 struct resource *res = alloc_resource(GFP_KERNEL);
1322 int ret;
1323
1324 if (!res)
1325 return NULL;
1326
1327 write_lock(&resource_lock);
1328 ret = __request_region_locked(res, parent, start, n, name, flags);
1329 write_unlock(&resource_lock);
1330
1331 if (ret) {
1332 free_resource(res);
1333 return NULL;
1334 }
1335
1336 if (parent == &iomem_resource)
1337 revoke_iomem(res);
1338
1339 return res;
1340}
1341EXPORT_SYMBOL(__request_region);
1342
1343/**
1344 * __release_region - release a previously reserved resource region
1345 * @parent: parent resource descriptor
1346 * @start: resource start address
1347 * @n: resource region size
1348 *
1349 * The described resource region must match a currently busy region.
1350 */
1351void __release_region(struct resource *parent, resource_size_t start,
1352 resource_size_t n)
1353{
1354 struct resource **p;
1355 resource_size_t end;
1356
1357 p = &parent->child;
1358 end = start + n - 1;
1359
1360 write_lock(&resource_lock);
1361
1362 for (;;) {
1363 struct resource *res = *p;
1364
1365 if (!res)
1366 break;
1367 if (res->start <= start && res->end >= end) {
1368 if (!(res->flags & IORESOURCE_BUSY)) {
1369 p = &res->child;
1370 continue;
1371 }
1372 if (res->start != start || res->end != end)
1373 break;
1374 *p = res->sibling;
1375 write_unlock(&resource_lock);
1376 if (res->flags & IORESOURCE_MUXED)
1377 wake_up(&muxed_resource_wait);
1378 free_resource(res);
1379 return;
1380 }
1381 p = &res->sibling;
1382 }
1383
1384 write_unlock(&resource_lock);
1385
1386 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1387}
1388EXPORT_SYMBOL(__release_region);
1389
1390#ifdef CONFIG_MEMORY_HOTREMOVE
1391/**
1392 * release_mem_region_adjustable - release a previously reserved memory region
1393 * @start: resource start address
1394 * @size: resource region size
1395 *
1396 * This interface is intended for memory hot-delete. The requested region
1397 * is released from a currently busy memory resource. The requested region
1398 * must either match exactly or fit into a single busy resource entry. In
1399 * the latter case, the remaining resource is adjusted accordingly.
1400 * Existing children of the busy memory resource must be immutable in the
1401 * request.
1402 *
1403 * Note:
1404 * - Additional release conditions, such as overlapping region, can be
1405 * supported after they are confirmed as valid cases.
1406 * - When a busy memory resource gets split into two entries, the code
1407 * assumes that all children remain in the lower address entry for
1408 * simplicity. Enhance this logic when necessary.
1409 */
1410void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1411{
1412 struct resource *parent = &iomem_resource;
1413 struct resource *new_res = NULL;
1414 bool alloc_nofail = false;
1415 struct resource **p;
1416 struct resource *res;
1417 resource_size_t end;
1418
1419 end = start + size - 1;
1420 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1421 return;
1422
1423 /*
1424 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1425 * just before releasing the region. This is highly unlikely to
1426 * fail - let's play save and make it never fail as the caller cannot
1427 * perform any error handling (e.g., trying to re-add memory will fail
1428 * similarly).
1429 */
1430retry:
1431 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1432
1433 p = &parent->child;
1434 write_lock(&resource_lock);
1435
1436 while ((res = *p)) {
1437 if (res->start >= end)
1438 break;
1439
1440 /* look for the next resource if it does not fit into */
1441 if (res->start > start || res->end < end) {
1442 p = &res->sibling;
1443 continue;
1444 }
1445
1446 if (!(res->flags & IORESOURCE_MEM))
1447 break;
1448
1449 if (!(res->flags & IORESOURCE_BUSY)) {
1450 p = &res->child;
1451 continue;
1452 }
1453
1454 /* found the target resource; let's adjust accordingly */
1455 if (res->start == start && res->end == end) {
1456 /* free the whole entry */
1457 *p = res->sibling;
1458 free_resource(res);
1459 } else if (res->start == start && res->end != end) {
1460 /* adjust the start */
1461 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1462 res->end - end));
1463 } else if (res->start != start && res->end == end) {
1464 /* adjust the end */
1465 WARN_ON_ONCE(__adjust_resource(res, res->start,
1466 start - res->start));
1467 } else {
1468 /* split into two entries - we need a new resource */
1469 if (!new_res) {
1470 new_res = alloc_resource(GFP_ATOMIC);
1471 if (!new_res) {
1472 alloc_nofail = true;
1473 write_unlock(&resource_lock);
1474 goto retry;
1475 }
1476 }
1477 new_res->name = res->name;
1478 new_res->start = end + 1;
1479 new_res->end = res->end;
1480 new_res->flags = res->flags;
1481 new_res->desc = res->desc;
1482 new_res->parent = res->parent;
1483 new_res->sibling = res->sibling;
1484 new_res->child = NULL;
1485
1486 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1487 start - res->start)))
1488 break;
1489 res->sibling = new_res;
1490 new_res = NULL;
1491 }
1492
1493 break;
1494 }
1495
1496 write_unlock(&resource_lock);
1497 free_resource(new_res);
1498}
1499#endif /* CONFIG_MEMORY_HOTREMOVE */
1500
1501#ifdef CONFIG_MEMORY_HOTPLUG
1502static bool system_ram_resources_mergeable(struct resource *r1,
1503 struct resource *r2)
1504{
1505 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1506 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1507 r1->name == r2->name && r1->desc == r2->desc &&
1508 !r1->child && !r2->child;
1509}
1510
1511/**
1512 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1513 * merge it with adjacent, mergeable resources
1514 * @res: resource descriptor
1515 *
1516 * This interface is intended for memory hotplug, whereby lots of contiguous
1517 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1518 * the actual resource boundaries are not of interest (e.g., it might be
1519 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1520 * same parent, and that don't have any children are considered. All mergeable
1521 * resources must be immutable during the request.
1522 *
1523 * Note:
1524 * - The caller has to make sure that no pointers to resources that are
1525 * marked mergeable are used anymore after this call - the resource might
1526 * be freed and the pointer might be stale!
1527 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1528 */
1529void merge_system_ram_resource(struct resource *res)
1530{
1531 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1532 struct resource *cur;
1533
1534 if (WARN_ON_ONCE((res->flags & flags) != flags))
1535 return;
1536
1537 write_lock(&resource_lock);
1538 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1539
1540 /* Try to merge with next item in the list. */
1541 cur = res->sibling;
1542 if (cur && system_ram_resources_mergeable(res, cur)) {
1543 res->end = cur->end;
1544 res->sibling = cur->sibling;
1545 free_resource(cur);
1546 }
1547
1548 /* Try to merge with previous item in the list. */
1549 cur = res->parent->child;
1550 while (cur && cur->sibling != res)
1551 cur = cur->sibling;
1552 if (cur && system_ram_resources_mergeable(cur, res)) {
1553 cur->end = res->end;
1554 cur->sibling = res->sibling;
1555 free_resource(res);
1556 }
1557 write_unlock(&resource_lock);
1558}
1559#endif /* CONFIG_MEMORY_HOTPLUG */
1560
1561/*
1562 * Managed region resource
1563 */
1564static void devm_resource_release(struct device *dev, void *ptr)
1565{
1566 struct resource **r = ptr;
1567
1568 release_resource(*r);
1569}
1570
1571/**
1572 * devm_request_resource() - request and reserve an I/O or memory resource
1573 * @dev: device for which to request the resource
1574 * @root: root of the resource tree from which to request the resource
1575 * @new: descriptor of the resource to request
1576 *
1577 * This is a device-managed version of request_resource(). There is usually
1578 * no need to release resources requested by this function explicitly since
1579 * that will be taken care of when the device is unbound from its driver.
1580 * If for some reason the resource needs to be released explicitly, because
1581 * of ordering issues for example, drivers must call devm_release_resource()
1582 * rather than the regular release_resource().
1583 *
1584 * When a conflict is detected between any existing resources and the newly
1585 * requested resource, an error message will be printed.
1586 *
1587 * Returns 0 on success or a negative error code on failure.
1588 */
1589int devm_request_resource(struct device *dev, struct resource *root,
1590 struct resource *new)
1591{
1592 struct resource *conflict, **ptr;
1593
1594 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1595 if (!ptr)
1596 return -ENOMEM;
1597
1598 *ptr = new;
1599
1600 conflict = request_resource_conflict(root, new);
1601 if (conflict) {
1602 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1603 new, conflict->name, conflict);
1604 devres_free(ptr);
1605 return -EBUSY;
1606 }
1607
1608 devres_add(dev, ptr);
1609 return 0;
1610}
1611EXPORT_SYMBOL(devm_request_resource);
1612
1613static int devm_resource_match(struct device *dev, void *res, void *data)
1614{
1615 struct resource **ptr = res;
1616
1617 return *ptr == data;
1618}
1619
1620/**
1621 * devm_release_resource() - release a previously requested resource
1622 * @dev: device for which to release the resource
1623 * @new: descriptor of the resource to release
1624 *
1625 * Releases a resource previously requested using devm_request_resource().
1626 */
1627void devm_release_resource(struct device *dev, struct resource *new)
1628{
1629 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1630 new));
1631}
1632EXPORT_SYMBOL(devm_release_resource);
1633
1634struct region_devres {
1635 struct resource *parent;
1636 resource_size_t start;
1637 resource_size_t n;
1638};
1639
1640static void devm_region_release(struct device *dev, void *res)
1641{
1642 struct region_devres *this = res;
1643
1644 __release_region(this->parent, this->start, this->n);
1645}
1646
1647static int devm_region_match(struct device *dev, void *res, void *match_data)
1648{
1649 struct region_devres *this = res, *match = match_data;
1650
1651 return this->parent == match->parent &&
1652 this->start == match->start && this->n == match->n;
1653}
1654
1655struct resource *
1656__devm_request_region(struct device *dev, struct resource *parent,
1657 resource_size_t start, resource_size_t n, const char *name)
1658{
1659 struct region_devres *dr = NULL;
1660 struct resource *res;
1661
1662 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1663 GFP_KERNEL);
1664 if (!dr)
1665 return NULL;
1666
1667 dr->parent = parent;
1668 dr->start = start;
1669 dr->n = n;
1670
1671 res = __request_region(parent, start, n, name, 0);
1672 if (res)
1673 devres_add(dev, dr);
1674 else
1675 devres_free(dr);
1676
1677 return res;
1678}
1679EXPORT_SYMBOL(__devm_request_region);
1680
1681void __devm_release_region(struct device *dev, struct resource *parent,
1682 resource_size_t start, resource_size_t n)
1683{
1684 struct region_devres match_data = { parent, start, n };
1685
1686 __release_region(parent, start, n);
1687 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1688 &match_data));
1689}
1690EXPORT_SYMBOL(__devm_release_region);
1691
1692/*
1693 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1694 */
1695#define MAXRESERVE 4
1696static int __init reserve_setup(char *str)
1697{
1698 static int reserved;
1699 static struct resource reserve[MAXRESERVE];
1700
1701 for (;;) {
1702 unsigned int io_start, io_num;
1703 int x = reserved;
1704 struct resource *parent;
1705
1706 if (get_option(&str, &io_start) != 2)
1707 break;
1708 if (get_option(&str, &io_num) == 0)
1709 break;
1710 if (x < MAXRESERVE) {
1711 struct resource *res = reserve + x;
1712
1713 /*
1714 * If the region starts below 0x10000, we assume it's
1715 * I/O port space; otherwise assume it's memory.
1716 */
1717 if (io_start < 0x10000) {
1718 res->flags = IORESOURCE_IO;
1719 parent = &ioport_resource;
1720 } else {
1721 res->flags = IORESOURCE_MEM;
1722 parent = &iomem_resource;
1723 }
1724 res->name = "reserved";
1725 res->start = io_start;
1726 res->end = io_start + io_num - 1;
1727 res->flags |= IORESOURCE_BUSY;
1728 res->desc = IORES_DESC_NONE;
1729 res->child = NULL;
1730 if (request_resource(parent, res) == 0)
1731 reserved = x+1;
1732 }
1733 }
1734 return 1;
1735}
1736__setup("reserve=", reserve_setup);
1737
1738/*
1739 * Check if the requested addr and size spans more than any slot in the
1740 * iomem resource tree.
1741 */
1742int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1743{
1744 resource_size_t end = addr + size - 1;
1745 struct resource *p;
1746 int err = 0;
1747
1748 read_lock(&resource_lock);
1749 for_each_resource(&iomem_resource, p, false) {
1750 /*
1751 * We can probably skip the resources without
1752 * IORESOURCE_IO attribute?
1753 */
1754 if (p->start > end)
1755 continue;
1756 if (p->end < addr)
1757 continue;
1758 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1759 PFN_DOWN(p->end) >= PFN_DOWN(end))
1760 continue;
1761 /*
1762 * if a resource is "BUSY", it's not a hardware resource
1763 * but a driver mapping of such a resource; we don't want
1764 * to warn for those; some drivers legitimately map only
1765 * partial hardware resources. (example: vesafb)
1766 */
1767 if (p->flags & IORESOURCE_BUSY)
1768 continue;
1769
1770 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1771 &addr, &end, p->name, p);
1772 err = -1;
1773 break;
1774 }
1775 read_unlock(&resource_lock);
1776
1777 return err;
1778}
1779
1780#ifdef CONFIG_STRICT_DEVMEM
1781static int strict_iomem_checks = 1;
1782#else
1783static int strict_iomem_checks;
1784#endif
1785
1786/*
1787 * Check if an address is exclusive to the kernel and must not be mapped to
1788 * user space, for example, via /dev/mem.
1789 *
1790 * Returns true if exclusive to the kernel, otherwise returns false.
1791 */
1792bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1793{
1794 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1795 IORESOURCE_EXCLUSIVE;
1796 bool skip_children = false, err = false;
1797 struct resource *p;
1798
1799 read_lock(&resource_lock);
1800 for_each_resource(root, p, skip_children) {
1801 if (p->start >= addr + size)
1802 break;
1803 if (p->end < addr) {
1804 skip_children = true;
1805 continue;
1806 }
1807 skip_children = false;
1808
1809 /*
1810 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1811 * IORESOURCE_EXCLUSIVE is set, even if they
1812 * are not busy and even if "iomem=relaxed" is set. The
1813 * responsible driver dynamically adds/removes system RAM within
1814 * such an area and uncontrolled access is dangerous.
1815 */
1816 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1817 err = true;
1818 break;
1819 }
1820
1821 /*
1822 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1823 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1824 * resource is busy.
1825 */
1826 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1827 continue;
1828 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1829 || p->flags & IORESOURCE_EXCLUSIVE) {
1830 err = true;
1831 break;
1832 }
1833 }
1834 read_unlock(&resource_lock);
1835
1836 return err;
1837}
1838
1839bool iomem_is_exclusive(u64 addr)
1840{
1841 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1842 PAGE_SIZE);
1843}
1844
1845struct resource_entry *resource_list_create_entry(struct resource *res,
1846 size_t extra_size)
1847{
1848 struct resource_entry *entry;
1849
1850 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1851 if (entry) {
1852 INIT_LIST_HEAD(&entry->node);
1853 entry->res = res ? res : &entry->__res;
1854 }
1855
1856 return entry;
1857}
1858EXPORT_SYMBOL(resource_list_create_entry);
1859
1860void resource_list_free(struct list_head *head)
1861{
1862 struct resource_entry *entry, *tmp;
1863
1864 list_for_each_entry_safe(entry, tmp, head, node)
1865 resource_list_destroy_entry(entry);
1866}
1867EXPORT_SYMBOL(resource_list_free);
1868
1869#ifdef CONFIG_GET_FREE_REGION
1870#define GFR_DESCENDING (1UL << 0)
1871#define GFR_REQUEST_REGION (1UL << 1)
1872#ifdef PA_SECTION_SHIFT
1873#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1874#else
1875#define GFR_DEFAULT_ALIGN PAGE_SIZE
1876#endif
1877
1878static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1879 resource_size_t align, unsigned long flags)
1880{
1881 if (flags & GFR_DESCENDING) {
1882 resource_size_t end;
1883
1884 end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1885 return end - size + 1;
1886 }
1887
1888 return ALIGN(max(base->start, align), align);
1889}
1890
1891static bool gfr_continue(struct resource *base, resource_size_t addr,
1892 resource_size_t size, unsigned long flags)
1893{
1894 if (flags & GFR_DESCENDING)
1895 return addr > size && addr >= base->start;
1896 /*
1897 * In the ascend case be careful that the last increment by
1898 * @size did not wrap 0.
1899 */
1900 return addr > addr - size &&
1901 addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1902}
1903
1904static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1905 unsigned long flags)
1906{
1907 if (flags & GFR_DESCENDING)
1908 return addr - size;
1909 return addr + size;
1910}
1911
1912static void remove_free_mem_region(void *_res)
1913{
1914 struct resource *res = _res;
1915
1916 if (res->parent)
1917 remove_resource(res);
1918 free_resource(res);
1919}
1920
1921static struct resource *
1922get_free_mem_region(struct device *dev, struct resource *base,
1923 resource_size_t size, const unsigned long align,
1924 const char *name, const unsigned long desc,
1925 const unsigned long flags)
1926{
1927 resource_size_t addr;
1928 struct resource *res;
1929 struct region_devres *dr = NULL;
1930
1931 size = ALIGN(size, align);
1932
1933 res = alloc_resource(GFP_KERNEL);
1934 if (!res)
1935 return ERR_PTR(-ENOMEM);
1936
1937 if (dev && (flags & GFR_REQUEST_REGION)) {
1938 dr = devres_alloc(devm_region_release,
1939 sizeof(struct region_devres), GFP_KERNEL);
1940 if (!dr) {
1941 free_resource(res);
1942 return ERR_PTR(-ENOMEM);
1943 }
1944 } else if (dev) {
1945 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1946 return ERR_PTR(-ENOMEM);
1947 }
1948
1949 write_lock(&resource_lock);
1950 for (addr = gfr_start(base, size, align, flags);
1951 gfr_continue(base, addr, align, flags);
1952 addr = gfr_next(addr, align, flags)) {
1953 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1954 REGION_DISJOINT)
1955 continue;
1956
1957 if (flags & GFR_REQUEST_REGION) {
1958 if (__request_region_locked(res, &iomem_resource, addr,
1959 size, name, 0))
1960 break;
1961
1962 if (dev) {
1963 dr->parent = &iomem_resource;
1964 dr->start = addr;
1965 dr->n = size;
1966 devres_add(dev, dr);
1967 }
1968
1969 res->desc = desc;
1970 write_unlock(&resource_lock);
1971
1972
1973 /*
1974 * A driver is claiming this region so revoke any
1975 * mappings.
1976 */
1977 revoke_iomem(res);
1978 } else {
1979 res->start = addr;
1980 res->end = addr + size - 1;
1981 res->name = name;
1982 res->desc = desc;
1983 res->flags = IORESOURCE_MEM;
1984
1985 /*
1986 * Only succeed if the resource hosts an exclusive
1987 * range after the insert
1988 */
1989 if (__insert_resource(base, res) || res->child)
1990 break;
1991
1992 write_unlock(&resource_lock);
1993 }
1994
1995 return res;
1996 }
1997 write_unlock(&resource_lock);
1998
1999 if (flags & GFR_REQUEST_REGION) {
2000 free_resource(res);
2001 devres_free(dr);
2002 } else if (dev)
2003 devm_release_action(dev, remove_free_mem_region, res);
2004
2005 return ERR_PTR(-ERANGE);
2006}
2007
2008/**
2009 * devm_request_free_mem_region - find free region for device private memory
2010 *
2011 * @dev: device struct to bind the resource to
2012 * @size: size in bytes of the device memory to add
2013 * @base: resource tree to look in
2014 *
2015 * This function tries to find an empty range of physical address big enough to
2016 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2017 * memory, which in turn allocates struct pages.
2018 */
2019struct resource *devm_request_free_mem_region(struct device *dev,
2020 struct resource *base, unsigned long size)
2021{
2022 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2023
2024 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2025 dev_name(dev),
2026 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2027}
2028EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2029
2030struct resource *request_free_mem_region(struct resource *base,
2031 unsigned long size, const char *name)
2032{
2033 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2034
2035 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2036 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2037}
2038EXPORT_SYMBOL_GPL(request_free_mem_region);
2039
2040/**
2041 * alloc_free_mem_region - find a free region relative to @base
2042 * @base: resource that will parent the new resource
2043 * @size: size in bytes of memory to allocate from @base
2044 * @align: alignment requirements for the allocation
2045 * @name: resource name
2046 *
2047 * Buses like CXL, that can dynamically instantiate new memory regions,
2048 * need a method to allocate physical address space for those regions.
2049 * Allocate and insert a new resource to cover a free, unclaimed by a
2050 * descendant of @base, range in the span of @base.
2051 */
2052struct resource *alloc_free_mem_region(struct resource *base,
2053 unsigned long size, unsigned long align,
2054 const char *name)
2055{
2056 /* Default of ascending direction and insert resource */
2057 unsigned long flags = 0;
2058
2059 return get_free_mem_region(NULL, base, size, align, name,
2060 IORES_DESC_NONE, flags);
2061}
2062EXPORT_SYMBOL_GPL(alloc_free_mem_region);
2063#endif /* CONFIG_GET_FREE_REGION */
2064
2065static int __init strict_iomem(char *str)
2066{
2067 if (strstr(str, "relaxed"))
2068 strict_iomem_checks = 0;
2069 if (strstr(str, "strict"))
2070 strict_iomem_checks = 1;
2071 return 1;
2072}
2073
2074static int iomem_fs_init_fs_context(struct fs_context *fc)
2075{
2076 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2077}
2078
2079static struct file_system_type iomem_fs_type = {
2080 .name = "iomem",
2081 .owner = THIS_MODULE,
2082 .init_fs_context = iomem_fs_init_fs_context,
2083 .kill_sb = kill_anon_super,
2084};
2085
2086static int __init iomem_init_inode(void)
2087{
2088 static struct vfsmount *iomem_vfs_mount;
2089 static int iomem_fs_cnt;
2090 struct inode *inode;
2091 int rc;
2092
2093 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2094 if (rc < 0) {
2095 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2096 return rc;
2097 }
2098
2099 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2100 if (IS_ERR(inode)) {
2101 rc = PTR_ERR(inode);
2102 pr_err("Cannot allocate inode for iomem: %d\n", rc);
2103 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2104 return rc;
2105 }
2106
2107 /*
2108 * Publish iomem revocation inode initialized.
2109 * Pairs with smp_load_acquire() in revoke_iomem().
2110 */
2111 smp_store_release(&iomem_inode, inode);
2112
2113 return 0;
2114}
2115
2116fs_initcall(iomem_init_inode);
2117
2118__setup("iomem=", strict_iomem);