Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/proc_fs.h>
21#include <linux/pseudo_fs.h>
22#include <linux/sched.h>
23#include <linux/seq_file.h>
24#include <linux/device.h>
25#include <linux/pfn.h>
26#include <linux/mm.h>
27#include <linux/mount.h>
28#include <linux/resource_ext.h>
29#include <uapi/linux/magic.h>
30#include <asm/io.h>
31
32
33struct resource ioport_resource = {
34 .name = "PCI IO",
35 .start = 0,
36 .end = IO_SPACE_LIMIT,
37 .flags = IORESOURCE_IO,
38};
39EXPORT_SYMBOL(ioport_resource);
40
41struct resource iomem_resource = {
42 .name = "PCI mem",
43 .start = 0,
44 .end = -1,
45 .flags = IORESOURCE_MEM,
46};
47EXPORT_SYMBOL(iomem_resource);
48
49/* constraints to be met while allocating resources */
50struct resource_constraint {
51 resource_size_t min, max, align;
52 resource_size_t (*alignf)(void *, const struct resource *,
53 resource_size_t, resource_size_t);
54 void *alignf_data;
55};
56
57static DEFINE_RWLOCK(resource_lock);
58
59static struct resource *next_resource(struct resource *p)
60{
61 if (p->child)
62 return p->child;
63 while (!p->sibling && p->parent)
64 p = p->parent;
65 return p->sibling;
66}
67
68static struct resource *next_resource_skip_children(struct resource *p)
69{
70 while (!p->sibling && p->parent)
71 p = p->parent;
72 return p->sibling;
73}
74
75#define for_each_resource(_root, _p, _skip_children) \
76 for ((_p) = (_root)->child; (_p); \
77 (_p) = (_skip_children) ? next_resource_skip_children(_p) : \
78 next_resource(_p))
79
80static void *r_next(struct seq_file *m, void *v, loff_t *pos)
81{
82 struct resource *p = v;
83 (*pos)++;
84 return (void *)next_resource(p);
85}
86
87#ifdef CONFIG_PROC_FS
88
89enum { MAX_IORES_LEVEL = 5 };
90
91static void *r_start(struct seq_file *m, loff_t *pos)
92 __acquires(resource_lock)
93{
94 struct resource *p = pde_data(file_inode(m->file));
95 loff_t l = 0;
96 read_lock(&resource_lock);
97 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
98 ;
99 return p;
100}
101
102static void r_stop(struct seq_file *m, void *v)
103 __releases(resource_lock)
104{
105 read_unlock(&resource_lock);
106}
107
108static int r_show(struct seq_file *m, void *v)
109{
110 struct resource *root = pde_data(file_inode(m->file));
111 struct resource *r = v, *p;
112 unsigned long long start, end;
113 int width = root->end < 0x10000 ? 4 : 8;
114 int depth;
115
116 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
117 if (p->parent == root)
118 break;
119
120 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
121 start = r->start;
122 end = r->end;
123 } else {
124 start = end = 0;
125 }
126
127 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
128 depth * 2, "",
129 width, start,
130 width, end,
131 r->name ? r->name : "<BAD>");
132 return 0;
133}
134
135static const struct seq_operations resource_op = {
136 .start = r_start,
137 .next = r_next,
138 .stop = r_stop,
139 .show = r_show,
140};
141
142static int __init ioresources_init(void)
143{
144 proc_create_seq_data("ioports", 0, NULL, &resource_op,
145 &ioport_resource);
146 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
147 return 0;
148}
149__initcall(ioresources_init);
150
151#endif /* CONFIG_PROC_FS */
152
153static void free_resource(struct resource *res)
154{
155 /**
156 * If the resource was allocated using memblock early during boot
157 * we'll leak it here: we can only return full pages back to the
158 * buddy and trying to be smart and reusing them eventually in
159 * alloc_resource() overcomplicates resource handling.
160 */
161 if (res && PageSlab(virt_to_head_page(res)))
162 kfree(res);
163}
164
165static struct resource *alloc_resource(gfp_t flags)
166{
167 return kzalloc(sizeof(struct resource), flags);
168}
169
170/* Return the conflict entry if you can't request it */
171static struct resource * __request_resource(struct resource *root, struct resource *new)
172{
173 resource_size_t start = new->start;
174 resource_size_t end = new->end;
175 struct resource *tmp, **p;
176
177 if (end < start)
178 return root;
179 if (start < root->start)
180 return root;
181 if (end > root->end)
182 return root;
183 p = &root->child;
184 for (;;) {
185 tmp = *p;
186 if (!tmp || tmp->start > end) {
187 new->sibling = tmp;
188 *p = new;
189 new->parent = root;
190 return NULL;
191 }
192 p = &tmp->sibling;
193 if (tmp->end < start)
194 continue;
195 return tmp;
196 }
197}
198
199static int __release_resource(struct resource *old, bool release_child)
200{
201 struct resource *tmp, **p, *chd;
202
203 p = &old->parent->child;
204 for (;;) {
205 tmp = *p;
206 if (!tmp)
207 break;
208 if (tmp == old) {
209 if (release_child || !(tmp->child)) {
210 *p = tmp->sibling;
211 } else {
212 for (chd = tmp->child;; chd = chd->sibling) {
213 chd->parent = tmp->parent;
214 if (!(chd->sibling))
215 break;
216 }
217 *p = tmp->child;
218 chd->sibling = tmp->sibling;
219 }
220 old->parent = NULL;
221 return 0;
222 }
223 p = &tmp->sibling;
224 }
225 return -EINVAL;
226}
227
228static void __release_child_resources(struct resource *r)
229{
230 struct resource *tmp, *p;
231 resource_size_t size;
232
233 p = r->child;
234 r->child = NULL;
235 while (p) {
236 tmp = p;
237 p = p->sibling;
238
239 tmp->parent = NULL;
240 tmp->sibling = NULL;
241 __release_child_resources(tmp);
242
243 printk(KERN_DEBUG "release child resource %pR\n", tmp);
244 /* need to restore size, and keep flags */
245 size = resource_size(tmp);
246 tmp->start = 0;
247 tmp->end = size - 1;
248 }
249}
250
251void release_child_resources(struct resource *r)
252{
253 write_lock(&resource_lock);
254 __release_child_resources(r);
255 write_unlock(&resource_lock);
256}
257
258/**
259 * request_resource_conflict - request and reserve an I/O or memory resource
260 * @root: root resource descriptor
261 * @new: resource descriptor desired by caller
262 *
263 * Returns 0 for success, conflict resource on error.
264 */
265struct resource *request_resource_conflict(struct resource *root, struct resource *new)
266{
267 struct resource *conflict;
268
269 write_lock(&resource_lock);
270 conflict = __request_resource(root, new);
271 write_unlock(&resource_lock);
272 return conflict;
273}
274
275/**
276 * request_resource - request and reserve an I/O or memory resource
277 * @root: root resource descriptor
278 * @new: resource descriptor desired by caller
279 *
280 * Returns 0 for success, negative error code on error.
281 */
282int request_resource(struct resource *root, struct resource *new)
283{
284 struct resource *conflict;
285
286 conflict = request_resource_conflict(root, new);
287 return conflict ? -EBUSY : 0;
288}
289
290EXPORT_SYMBOL(request_resource);
291
292/**
293 * release_resource - release a previously reserved resource
294 * @old: resource pointer
295 */
296int release_resource(struct resource *old)
297{
298 int retval;
299
300 write_lock(&resource_lock);
301 retval = __release_resource(old, true);
302 write_unlock(&resource_lock);
303 return retval;
304}
305
306EXPORT_SYMBOL(release_resource);
307
308/**
309 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
310 * [@start..@end].
311 *
312 * If a resource is found, returns 0 and @*res is overwritten with the part
313 * of the resource that's within [@start..@end]; if none is found, returns
314 * -ENODEV. Returns -EINVAL for invalid parameters.
315 *
316 * @start: start address of the resource searched for
317 * @end: end address of same resource
318 * @flags: flags which the resource must have
319 * @desc: descriptor the resource must have
320 * @res: return ptr, if resource found
321 *
322 * The caller must specify @start, @end, @flags, and @desc
323 * (which may be IORES_DESC_NONE).
324 */
325static int find_next_iomem_res(resource_size_t start, resource_size_t end,
326 unsigned long flags, unsigned long desc,
327 struct resource *res)
328{
329 struct resource *p;
330
331 if (!res)
332 return -EINVAL;
333
334 if (start >= end)
335 return -EINVAL;
336
337 read_lock(&resource_lock);
338
339 for (p = iomem_resource.child; p; p = next_resource(p)) {
340 /* If we passed the resource we are looking for, stop */
341 if (p->start > end) {
342 p = NULL;
343 break;
344 }
345
346 /* Skip until we find a range that matches what we look for */
347 if (p->end < start)
348 continue;
349
350 if ((p->flags & flags) != flags)
351 continue;
352 if ((desc != IORES_DESC_NONE) && (desc != p->desc))
353 continue;
354
355 /* Found a match, break */
356 break;
357 }
358
359 if (p) {
360 /* copy data */
361 *res = (struct resource) {
362 .start = max(start, p->start),
363 .end = min(end, p->end),
364 .flags = p->flags,
365 .desc = p->desc,
366 .parent = p->parent,
367 };
368 }
369
370 read_unlock(&resource_lock);
371 return p ? 0 : -ENODEV;
372}
373
374static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
375 unsigned long flags, unsigned long desc,
376 void *arg,
377 int (*func)(struct resource *, void *))
378{
379 struct resource res;
380 int ret = -EINVAL;
381
382 while (start < end &&
383 !find_next_iomem_res(start, end, flags, desc, &res)) {
384 ret = (*func)(&res, arg);
385 if (ret)
386 break;
387
388 start = res.end + 1;
389 }
390
391 return ret;
392}
393
394/**
395 * walk_iomem_res_desc - Walks through iomem resources and calls func()
396 * with matching resource ranges.
397 * *
398 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
399 * @flags: I/O resource flags
400 * @start: start addr
401 * @end: end addr
402 * @arg: function argument for the callback @func
403 * @func: callback function that is called for each qualifying resource area
404 *
405 * All the memory ranges which overlap start,end and also match flags and
406 * desc are valid candidates.
407 *
408 * NOTE: For a new descriptor search, define a new IORES_DESC in
409 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
410 */
411int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
412 u64 end, void *arg, int (*func)(struct resource *, void *))
413{
414 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
415}
416EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
417
418/*
419 * This function calls the @func callback against all memory ranges of type
420 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
421 * Now, this function is only for System RAM, it deals with full ranges and
422 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
423 * ranges.
424 */
425int walk_system_ram_res(u64 start, u64 end, void *arg,
426 int (*func)(struct resource *, void *))
427{
428 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
429
430 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
431 func);
432}
433
434/*
435 * This function calls the @func callback against all memory ranges, which
436 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
437 */
438int walk_mem_res(u64 start, u64 end, void *arg,
439 int (*func)(struct resource *, void *))
440{
441 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
442
443 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
444 func);
445}
446
447/*
448 * This function calls the @func callback against all memory ranges of type
449 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
450 * It is to be used only for System RAM.
451 */
452int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
453 void *arg, int (*func)(unsigned long, unsigned long, void *))
454{
455 resource_size_t start, end;
456 unsigned long flags;
457 struct resource res;
458 unsigned long pfn, end_pfn;
459 int ret = -EINVAL;
460
461 start = (u64) start_pfn << PAGE_SHIFT;
462 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
463 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
464 while (start < end &&
465 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
466 pfn = PFN_UP(res.start);
467 end_pfn = PFN_DOWN(res.end + 1);
468 if (end_pfn > pfn)
469 ret = (*func)(pfn, end_pfn - pfn, arg);
470 if (ret)
471 break;
472 start = res.end + 1;
473 }
474 return ret;
475}
476
477static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
478{
479 return 1;
480}
481
482/*
483 * This generic page_is_ram() returns true if specified address is
484 * registered as System RAM in iomem_resource list.
485 */
486int __weak page_is_ram(unsigned long pfn)
487{
488 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
489}
490EXPORT_SYMBOL_GPL(page_is_ram);
491
492static int __region_intersects(struct resource *parent, resource_size_t start,
493 size_t size, unsigned long flags,
494 unsigned long desc)
495{
496 struct resource res;
497 int type = 0; int other = 0;
498 struct resource *p;
499
500 res.start = start;
501 res.end = start + size - 1;
502
503 for (p = parent->child; p ; p = p->sibling) {
504 bool is_type = (((p->flags & flags) == flags) &&
505 ((desc == IORES_DESC_NONE) ||
506 (desc == p->desc)));
507
508 if (resource_overlaps(p, &res))
509 is_type ? type++ : other++;
510 }
511
512 if (type == 0)
513 return REGION_DISJOINT;
514
515 if (other == 0)
516 return REGION_INTERSECTS;
517
518 return REGION_MIXED;
519}
520
521/**
522 * region_intersects() - determine intersection of region with known resources
523 * @start: region start address
524 * @size: size of region
525 * @flags: flags of resource (in iomem_resource)
526 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
527 *
528 * Check if the specified region partially overlaps or fully eclipses a
529 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
530 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
531 * return REGION_MIXED if the region overlaps @flags/@desc and another
532 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
533 * and no other defined resource. Note that REGION_INTERSECTS is also
534 * returned in the case when the specified region overlaps RAM and undefined
535 * memory holes.
536 *
537 * region_intersect() is used by memory remapping functions to ensure
538 * the user is not remapping RAM and is a vast speed up over walking
539 * through the resource table page by page.
540 */
541int region_intersects(resource_size_t start, size_t size, unsigned long flags,
542 unsigned long desc)
543{
544 int ret;
545
546 read_lock(&resource_lock);
547 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
548 read_unlock(&resource_lock);
549
550 return ret;
551}
552EXPORT_SYMBOL_GPL(region_intersects);
553
554void __weak arch_remove_reservations(struct resource *avail)
555{
556}
557
558static resource_size_t simple_align_resource(void *data,
559 const struct resource *avail,
560 resource_size_t size,
561 resource_size_t align)
562{
563 return avail->start;
564}
565
566static void resource_clip(struct resource *res, resource_size_t min,
567 resource_size_t max)
568{
569 if (res->start < min)
570 res->start = min;
571 if (res->end > max)
572 res->end = max;
573}
574
575/*
576 * Find empty slot in the resource tree with the given range and
577 * alignment constraints
578 */
579static int __find_resource(struct resource *root, struct resource *old,
580 struct resource *new,
581 resource_size_t size,
582 struct resource_constraint *constraint)
583{
584 struct resource *this = root->child;
585 struct resource tmp = *new, avail, alloc;
586
587 tmp.start = root->start;
588 /*
589 * Skip past an allocated resource that starts at 0, since the assignment
590 * of this->start - 1 to tmp->end below would cause an underflow.
591 */
592 if (this && this->start == root->start) {
593 tmp.start = (this == old) ? old->start : this->end + 1;
594 this = this->sibling;
595 }
596 for(;;) {
597 if (this)
598 tmp.end = (this == old) ? this->end : this->start - 1;
599 else
600 tmp.end = root->end;
601
602 if (tmp.end < tmp.start)
603 goto next;
604
605 resource_clip(&tmp, constraint->min, constraint->max);
606 arch_remove_reservations(&tmp);
607
608 /* Check for overflow after ALIGN() */
609 avail.start = ALIGN(tmp.start, constraint->align);
610 avail.end = tmp.end;
611 avail.flags = new->flags & ~IORESOURCE_UNSET;
612 if (avail.start >= tmp.start) {
613 alloc.flags = avail.flags;
614 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
615 size, constraint->align);
616 alloc.end = alloc.start + size - 1;
617 if (alloc.start <= alloc.end &&
618 resource_contains(&avail, &alloc)) {
619 new->start = alloc.start;
620 new->end = alloc.end;
621 return 0;
622 }
623 }
624
625next: if (!this || this->end == root->end)
626 break;
627
628 if (this != old)
629 tmp.start = this->end + 1;
630 this = this->sibling;
631 }
632 return -EBUSY;
633}
634
635/*
636 * Find empty slot in the resource tree given range and alignment.
637 */
638static int find_resource(struct resource *root, struct resource *new,
639 resource_size_t size,
640 struct resource_constraint *constraint)
641{
642 return __find_resource(root, NULL, new, size, constraint);
643}
644
645/**
646 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
647 * The resource will be relocated if the new size cannot be reallocated in the
648 * current location.
649 *
650 * @root: root resource descriptor
651 * @old: resource descriptor desired by caller
652 * @newsize: new size of the resource descriptor
653 * @constraint: the size and alignment constraints to be met.
654 */
655static int reallocate_resource(struct resource *root, struct resource *old,
656 resource_size_t newsize,
657 struct resource_constraint *constraint)
658{
659 int err=0;
660 struct resource new = *old;
661 struct resource *conflict;
662
663 write_lock(&resource_lock);
664
665 if ((err = __find_resource(root, old, &new, newsize, constraint)))
666 goto out;
667
668 if (resource_contains(&new, old)) {
669 old->start = new.start;
670 old->end = new.end;
671 goto out;
672 }
673
674 if (old->child) {
675 err = -EBUSY;
676 goto out;
677 }
678
679 if (resource_contains(old, &new)) {
680 old->start = new.start;
681 old->end = new.end;
682 } else {
683 __release_resource(old, true);
684 *old = new;
685 conflict = __request_resource(root, old);
686 BUG_ON(conflict);
687 }
688out:
689 write_unlock(&resource_lock);
690 return err;
691}
692
693
694/**
695 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
696 * The resource will be reallocated with a new size if it was already allocated
697 * @root: root resource descriptor
698 * @new: resource descriptor desired by caller
699 * @size: requested resource region size
700 * @min: minimum boundary to allocate
701 * @max: maximum boundary to allocate
702 * @align: alignment requested, in bytes
703 * @alignf: alignment function, optional, called if not NULL
704 * @alignf_data: arbitrary data to pass to the @alignf function
705 */
706int allocate_resource(struct resource *root, struct resource *new,
707 resource_size_t size, resource_size_t min,
708 resource_size_t max, resource_size_t align,
709 resource_size_t (*alignf)(void *,
710 const struct resource *,
711 resource_size_t,
712 resource_size_t),
713 void *alignf_data)
714{
715 int err;
716 struct resource_constraint constraint;
717
718 if (!alignf)
719 alignf = simple_align_resource;
720
721 constraint.min = min;
722 constraint.max = max;
723 constraint.align = align;
724 constraint.alignf = alignf;
725 constraint.alignf_data = alignf_data;
726
727 if ( new->parent ) {
728 /* resource is already allocated, try reallocating with
729 the new constraints */
730 return reallocate_resource(root, new, size, &constraint);
731 }
732
733 write_lock(&resource_lock);
734 err = find_resource(root, new, size, &constraint);
735 if (err >= 0 && __request_resource(root, new))
736 err = -EBUSY;
737 write_unlock(&resource_lock);
738 return err;
739}
740
741EXPORT_SYMBOL(allocate_resource);
742
743/**
744 * lookup_resource - find an existing resource by a resource start address
745 * @root: root resource descriptor
746 * @start: resource start address
747 *
748 * Returns a pointer to the resource if found, NULL otherwise
749 */
750struct resource *lookup_resource(struct resource *root, resource_size_t start)
751{
752 struct resource *res;
753
754 read_lock(&resource_lock);
755 for (res = root->child; res; res = res->sibling) {
756 if (res->start == start)
757 break;
758 }
759 read_unlock(&resource_lock);
760
761 return res;
762}
763
764/*
765 * Insert a resource into the resource tree. If successful, return NULL,
766 * otherwise return the conflicting resource (compare to __request_resource())
767 */
768static struct resource * __insert_resource(struct resource *parent, struct resource *new)
769{
770 struct resource *first, *next;
771
772 for (;; parent = first) {
773 first = __request_resource(parent, new);
774 if (!first)
775 return first;
776
777 if (first == parent)
778 return first;
779 if (WARN_ON(first == new)) /* duplicated insertion */
780 return first;
781
782 if ((first->start > new->start) || (first->end < new->end))
783 break;
784 if ((first->start == new->start) && (first->end == new->end))
785 break;
786 }
787
788 for (next = first; ; next = next->sibling) {
789 /* Partial overlap? Bad, and unfixable */
790 if (next->start < new->start || next->end > new->end)
791 return next;
792 if (!next->sibling)
793 break;
794 if (next->sibling->start > new->end)
795 break;
796 }
797
798 new->parent = parent;
799 new->sibling = next->sibling;
800 new->child = first;
801
802 next->sibling = NULL;
803 for (next = first; next; next = next->sibling)
804 next->parent = new;
805
806 if (parent->child == first) {
807 parent->child = new;
808 } else {
809 next = parent->child;
810 while (next->sibling != first)
811 next = next->sibling;
812 next->sibling = new;
813 }
814 return NULL;
815}
816
817/**
818 * insert_resource_conflict - Inserts resource in the resource tree
819 * @parent: parent of the new resource
820 * @new: new resource to insert
821 *
822 * Returns 0 on success, conflict resource if the resource can't be inserted.
823 *
824 * This function is equivalent to request_resource_conflict when no conflict
825 * happens. If a conflict happens, and the conflicting resources
826 * entirely fit within the range of the new resource, then the new
827 * resource is inserted and the conflicting resources become children of
828 * the new resource.
829 *
830 * This function is intended for producers of resources, such as FW modules
831 * and bus drivers.
832 */
833struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
834{
835 struct resource *conflict;
836
837 write_lock(&resource_lock);
838 conflict = __insert_resource(parent, new);
839 write_unlock(&resource_lock);
840 return conflict;
841}
842
843/**
844 * insert_resource - Inserts a resource in the resource tree
845 * @parent: parent of the new resource
846 * @new: new resource to insert
847 *
848 * Returns 0 on success, -EBUSY if the resource can't be inserted.
849 *
850 * This function is intended for producers of resources, such as FW modules
851 * and bus drivers.
852 */
853int insert_resource(struct resource *parent, struct resource *new)
854{
855 struct resource *conflict;
856
857 conflict = insert_resource_conflict(parent, new);
858 return conflict ? -EBUSY : 0;
859}
860EXPORT_SYMBOL_GPL(insert_resource);
861
862/**
863 * insert_resource_expand_to_fit - Insert a resource into the resource tree
864 * @root: root resource descriptor
865 * @new: new resource to insert
866 *
867 * Insert a resource into the resource tree, possibly expanding it in order
868 * to make it encompass any conflicting resources.
869 */
870void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
871{
872 if (new->parent)
873 return;
874
875 write_lock(&resource_lock);
876 for (;;) {
877 struct resource *conflict;
878
879 conflict = __insert_resource(root, new);
880 if (!conflict)
881 break;
882 if (conflict == root)
883 break;
884
885 /* Ok, expand resource to cover the conflict, then try again .. */
886 if (conflict->start < new->start)
887 new->start = conflict->start;
888 if (conflict->end > new->end)
889 new->end = conflict->end;
890
891 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
892 }
893 write_unlock(&resource_lock);
894}
895/*
896 * Not for general consumption, only early boot memory map parsing, PCI
897 * resource discovery, and late discovery of CXL resources are expected
898 * to use this interface. The former are built-in and only the latter,
899 * CXL, is a module.
900 */
901EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
902
903/**
904 * remove_resource - Remove a resource in the resource tree
905 * @old: resource to remove
906 *
907 * Returns 0 on success, -EINVAL if the resource is not valid.
908 *
909 * This function removes a resource previously inserted by insert_resource()
910 * or insert_resource_conflict(), and moves the children (if any) up to
911 * where they were before. insert_resource() and insert_resource_conflict()
912 * insert a new resource, and move any conflicting resources down to the
913 * children of the new resource.
914 *
915 * insert_resource(), insert_resource_conflict() and remove_resource() are
916 * intended for producers of resources, such as FW modules and bus drivers.
917 */
918int remove_resource(struct resource *old)
919{
920 int retval;
921
922 write_lock(&resource_lock);
923 retval = __release_resource(old, false);
924 write_unlock(&resource_lock);
925 return retval;
926}
927EXPORT_SYMBOL_GPL(remove_resource);
928
929static int __adjust_resource(struct resource *res, resource_size_t start,
930 resource_size_t size)
931{
932 struct resource *tmp, *parent = res->parent;
933 resource_size_t end = start + size - 1;
934 int result = -EBUSY;
935
936 if (!parent)
937 goto skip;
938
939 if ((start < parent->start) || (end > parent->end))
940 goto out;
941
942 if (res->sibling && (res->sibling->start <= end))
943 goto out;
944
945 tmp = parent->child;
946 if (tmp != res) {
947 while (tmp->sibling != res)
948 tmp = tmp->sibling;
949 if (start <= tmp->end)
950 goto out;
951 }
952
953skip:
954 for (tmp = res->child; tmp; tmp = tmp->sibling)
955 if ((tmp->start < start) || (tmp->end > end))
956 goto out;
957
958 res->start = start;
959 res->end = end;
960 result = 0;
961
962 out:
963 return result;
964}
965
966/**
967 * adjust_resource - modify a resource's start and size
968 * @res: resource to modify
969 * @start: new start value
970 * @size: new size
971 *
972 * Given an existing resource, change its start and size to match the
973 * arguments. Returns 0 on success, -EBUSY if it can't fit.
974 * Existing children of the resource are assumed to be immutable.
975 */
976int adjust_resource(struct resource *res, resource_size_t start,
977 resource_size_t size)
978{
979 int result;
980
981 write_lock(&resource_lock);
982 result = __adjust_resource(res, start, size);
983 write_unlock(&resource_lock);
984 return result;
985}
986EXPORT_SYMBOL(adjust_resource);
987
988static void __init
989__reserve_region_with_split(struct resource *root, resource_size_t start,
990 resource_size_t end, const char *name)
991{
992 struct resource *parent = root;
993 struct resource *conflict;
994 struct resource *res = alloc_resource(GFP_ATOMIC);
995 struct resource *next_res = NULL;
996 int type = resource_type(root);
997
998 if (!res)
999 return;
1000
1001 res->name = name;
1002 res->start = start;
1003 res->end = end;
1004 res->flags = type | IORESOURCE_BUSY;
1005 res->desc = IORES_DESC_NONE;
1006
1007 while (1) {
1008
1009 conflict = __request_resource(parent, res);
1010 if (!conflict) {
1011 if (!next_res)
1012 break;
1013 res = next_res;
1014 next_res = NULL;
1015 continue;
1016 }
1017
1018 /* conflict covered whole area */
1019 if (conflict->start <= res->start &&
1020 conflict->end >= res->end) {
1021 free_resource(res);
1022 WARN_ON(next_res);
1023 break;
1024 }
1025
1026 /* failed, split and try again */
1027 if (conflict->start > res->start) {
1028 end = res->end;
1029 res->end = conflict->start - 1;
1030 if (conflict->end < end) {
1031 next_res = alloc_resource(GFP_ATOMIC);
1032 if (!next_res) {
1033 free_resource(res);
1034 break;
1035 }
1036 next_res->name = name;
1037 next_res->start = conflict->end + 1;
1038 next_res->end = end;
1039 next_res->flags = type | IORESOURCE_BUSY;
1040 next_res->desc = IORES_DESC_NONE;
1041 }
1042 } else {
1043 res->start = conflict->end + 1;
1044 }
1045 }
1046
1047}
1048
1049void __init
1050reserve_region_with_split(struct resource *root, resource_size_t start,
1051 resource_size_t end, const char *name)
1052{
1053 int abort = 0;
1054
1055 write_lock(&resource_lock);
1056 if (root->start > start || root->end < end) {
1057 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1058 (unsigned long long)start, (unsigned long long)end,
1059 root);
1060 if (start > root->end || end < root->start)
1061 abort = 1;
1062 else {
1063 if (end > root->end)
1064 end = root->end;
1065 if (start < root->start)
1066 start = root->start;
1067 pr_err("fixing request to [0x%llx-0x%llx]\n",
1068 (unsigned long long)start,
1069 (unsigned long long)end);
1070 }
1071 dump_stack();
1072 }
1073 if (!abort)
1074 __reserve_region_with_split(root, start, end, name);
1075 write_unlock(&resource_lock);
1076}
1077
1078/**
1079 * resource_alignment - calculate resource's alignment
1080 * @res: resource pointer
1081 *
1082 * Returns alignment on success, 0 (invalid alignment) on failure.
1083 */
1084resource_size_t resource_alignment(struct resource *res)
1085{
1086 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1087 case IORESOURCE_SIZEALIGN:
1088 return resource_size(res);
1089 case IORESOURCE_STARTALIGN:
1090 return res->start;
1091 default:
1092 return 0;
1093 }
1094}
1095
1096/*
1097 * This is compatibility stuff for IO resources.
1098 *
1099 * Note how this, unlike the above, knows about
1100 * the IO flag meanings (busy etc).
1101 *
1102 * request_region creates a new busy region.
1103 *
1104 * release_region releases a matching busy region.
1105 */
1106
1107static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1108
1109static struct inode *iomem_inode;
1110
1111#ifdef CONFIG_IO_STRICT_DEVMEM
1112static void revoke_iomem(struct resource *res)
1113{
1114 /* pairs with smp_store_release() in iomem_init_inode() */
1115 struct inode *inode = smp_load_acquire(&iomem_inode);
1116
1117 /*
1118 * Check that the initialization has completed. Losing the race
1119 * is ok because it means drivers are claiming resources before
1120 * the fs_initcall level of init and prevent iomem_get_mapping users
1121 * from establishing mappings.
1122 */
1123 if (!inode)
1124 return;
1125
1126 /*
1127 * The expectation is that the driver has successfully marked
1128 * the resource busy by this point, so devmem_is_allowed()
1129 * should start returning false, however for performance this
1130 * does not iterate the entire resource range.
1131 */
1132 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1133 devmem_is_allowed(PHYS_PFN(res->end))) {
1134 /*
1135 * *cringe* iomem=relaxed says "go ahead, what's the
1136 * worst that can happen?"
1137 */
1138 return;
1139 }
1140
1141 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1142}
1143#else
1144static void revoke_iomem(struct resource *res) {}
1145#endif
1146
1147struct address_space *iomem_get_mapping(void)
1148{
1149 /*
1150 * This function is only called from file open paths, hence guaranteed
1151 * that fs_initcalls have completed and no need to check for NULL. But
1152 * since revoke_iomem can be called before the initcall we still need
1153 * the barrier to appease checkers.
1154 */
1155 return smp_load_acquire(&iomem_inode)->i_mapping;
1156}
1157
1158static int __request_region_locked(struct resource *res, struct resource *parent,
1159 resource_size_t start, resource_size_t n,
1160 const char *name, int flags)
1161{
1162 DECLARE_WAITQUEUE(wait, current);
1163
1164 res->name = name;
1165 res->start = start;
1166 res->end = start + n - 1;
1167
1168 for (;;) {
1169 struct resource *conflict;
1170
1171 res->flags = resource_type(parent) | resource_ext_type(parent);
1172 res->flags |= IORESOURCE_BUSY | flags;
1173 res->desc = parent->desc;
1174
1175 conflict = __request_resource(parent, res);
1176 if (!conflict)
1177 break;
1178 /*
1179 * mm/hmm.c reserves physical addresses which then
1180 * become unavailable to other users. Conflicts are
1181 * not expected. Warn to aid debugging if encountered.
1182 */
1183 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1184 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1185 conflict->name, conflict, res);
1186 }
1187 if (conflict != parent) {
1188 if (!(conflict->flags & IORESOURCE_BUSY)) {
1189 parent = conflict;
1190 continue;
1191 }
1192 }
1193 if (conflict->flags & flags & IORESOURCE_MUXED) {
1194 add_wait_queue(&muxed_resource_wait, &wait);
1195 write_unlock(&resource_lock);
1196 set_current_state(TASK_UNINTERRUPTIBLE);
1197 schedule();
1198 remove_wait_queue(&muxed_resource_wait, &wait);
1199 write_lock(&resource_lock);
1200 continue;
1201 }
1202 /* Uhhuh, that didn't work out.. */
1203 return -EBUSY;
1204 }
1205
1206 return 0;
1207}
1208
1209/**
1210 * __request_region - create a new busy resource region
1211 * @parent: parent resource descriptor
1212 * @start: resource start address
1213 * @n: resource region size
1214 * @name: reserving caller's ID string
1215 * @flags: IO resource flags
1216 */
1217struct resource *__request_region(struct resource *parent,
1218 resource_size_t start, resource_size_t n,
1219 const char *name, int flags)
1220{
1221 struct resource *res = alloc_resource(GFP_KERNEL);
1222 int ret;
1223
1224 if (!res)
1225 return NULL;
1226
1227 write_lock(&resource_lock);
1228 ret = __request_region_locked(res, parent, start, n, name, flags);
1229 write_unlock(&resource_lock);
1230
1231 if (ret) {
1232 free_resource(res);
1233 return NULL;
1234 }
1235
1236 if (parent == &iomem_resource)
1237 revoke_iomem(res);
1238
1239 return res;
1240}
1241EXPORT_SYMBOL(__request_region);
1242
1243/**
1244 * __release_region - release a previously reserved resource region
1245 * @parent: parent resource descriptor
1246 * @start: resource start address
1247 * @n: resource region size
1248 *
1249 * The described resource region must match a currently busy region.
1250 */
1251void __release_region(struct resource *parent, resource_size_t start,
1252 resource_size_t n)
1253{
1254 struct resource **p;
1255 resource_size_t end;
1256
1257 p = &parent->child;
1258 end = start + n - 1;
1259
1260 write_lock(&resource_lock);
1261
1262 for (;;) {
1263 struct resource *res = *p;
1264
1265 if (!res)
1266 break;
1267 if (res->start <= start && res->end >= end) {
1268 if (!(res->flags & IORESOURCE_BUSY)) {
1269 p = &res->child;
1270 continue;
1271 }
1272 if (res->start != start || res->end != end)
1273 break;
1274 *p = res->sibling;
1275 write_unlock(&resource_lock);
1276 if (res->flags & IORESOURCE_MUXED)
1277 wake_up(&muxed_resource_wait);
1278 free_resource(res);
1279 return;
1280 }
1281 p = &res->sibling;
1282 }
1283
1284 write_unlock(&resource_lock);
1285
1286 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1287}
1288EXPORT_SYMBOL(__release_region);
1289
1290#ifdef CONFIG_MEMORY_HOTREMOVE
1291/**
1292 * release_mem_region_adjustable - release a previously reserved memory region
1293 * @start: resource start address
1294 * @size: resource region size
1295 *
1296 * This interface is intended for memory hot-delete. The requested region
1297 * is released from a currently busy memory resource. The requested region
1298 * must either match exactly or fit into a single busy resource entry. In
1299 * the latter case, the remaining resource is adjusted accordingly.
1300 * Existing children of the busy memory resource must be immutable in the
1301 * request.
1302 *
1303 * Note:
1304 * - Additional release conditions, such as overlapping region, can be
1305 * supported after they are confirmed as valid cases.
1306 * - When a busy memory resource gets split into two entries, the code
1307 * assumes that all children remain in the lower address entry for
1308 * simplicity. Enhance this logic when necessary.
1309 */
1310void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1311{
1312 struct resource *parent = &iomem_resource;
1313 struct resource *new_res = NULL;
1314 bool alloc_nofail = false;
1315 struct resource **p;
1316 struct resource *res;
1317 resource_size_t end;
1318
1319 end = start + size - 1;
1320 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1321 return;
1322
1323 /*
1324 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1325 * just before releasing the region. This is highly unlikely to
1326 * fail - let's play save and make it never fail as the caller cannot
1327 * perform any error handling (e.g., trying to re-add memory will fail
1328 * similarly).
1329 */
1330retry:
1331 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1332
1333 p = &parent->child;
1334 write_lock(&resource_lock);
1335
1336 while ((res = *p)) {
1337 if (res->start >= end)
1338 break;
1339
1340 /* look for the next resource if it does not fit into */
1341 if (res->start > start || res->end < end) {
1342 p = &res->sibling;
1343 continue;
1344 }
1345
1346 /*
1347 * All memory regions added from memory-hotplug path have the
1348 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
1349 * this flag, we know that we are dealing with a resource coming
1350 * from HMM/devm. HMM/devm use another mechanism to add/release
1351 * a resource. This goes via devm_request_mem_region and
1352 * devm_release_mem_region.
1353 * HMM/devm take care to release their resources when they want,
1354 * so if we are dealing with them, let us just back off here.
1355 */
1356 if (!(res->flags & IORESOURCE_SYSRAM)) {
1357 break;
1358 }
1359
1360 if (!(res->flags & IORESOURCE_MEM))
1361 break;
1362
1363 if (!(res->flags & IORESOURCE_BUSY)) {
1364 p = &res->child;
1365 continue;
1366 }
1367
1368 /* found the target resource; let's adjust accordingly */
1369 if (res->start == start && res->end == end) {
1370 /* free the whole entry */
1371 *p = res->sibling;
1372 free_resource(res);
1373 } else if (res->start == start && res->end != end) {
1374 /* adjust the start */
1375 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1376 res->end - end));
1377 } else if (res->start != start && res->end == end) {
1378 /* adjust the end */
1379 WARN_ON_ONCE(__adjust_resource(res, res->start,
1380 start - res->start));
1381 } else {
1382 /* split into two entries - we need a new resource */
1383 if (!new_res) {
1384 new_res = alloc_resource(GFP_ATOMIC);
1385 if (!new_res) {
1386 alloc_nofail = true;
1387 write_unlock(&resource_lock);
1388 goto retry;
1389 }
1390 }
1391 new_res->name = res->name;
1392 new_res->start = end + 1;
1393 new_res->end = res->end;
1394 new_res->flags = res->flags;
1395 new_res->desc = res->desc;
1396 new_res->parent = res->parent;
1397 new_res->sibling = res->sibling;
1398 new_res->child = NULL;
1399
1400 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1401 start - res->start)))
1402 break;
1403 res->sibling = new_res;
1404 new_res = NULL;
1405 }
1406
1407 break;
1408 }
1409
1410 write_unlock(&resource_lock);
1411 free_resource(new_res);
1412}
1413#endif /* CONFIG_MEMORY_HOTREMOVE */
1414
1415#ifdef CONFIG_MEMORY_HOTPLUG
1416static bool system_ram_resources_mergeable(struct resource *r1,
1417 struct resource *r2)
1418{
1419 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1420 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1421 r1->name == r2->name && r1->desc == r2->desc &&
1422 !r1->child && !r2->child;
1423}
1424
1425/**
1426 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1427 * merge it with adjacent, mergeable resources
1428 * @res: resource descriptor
1429 *
1430 * This interface is intended for memory hotplug, whereby lots of contiguous
1431 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1432 * the actual resource boundaries are not of interest (e.g., it might be
1433 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1434 * same parent, and that don't have any children are considered. All mergeable
1435 * resources must be immutable during the request.
1436 *
1437 * Note:
1438 * - The caller has to make sure that no pointers to resources that are
1439 * marked mergeable are used anymore after this call - the resource might
1440 * be freed and the pointer might be stale!
1441 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1442 */
1443void merge_system_ram_resource(struct resource *res)
1444{
1445 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1446 struct resource *cur;
1447
1448 if (WARN_ON_ONCE((res->flags & flags) != flags))
1449 return;
1450
1451 write_lock(&resource_lock);
1452 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1453
1454 /* Try to merge with next item in the list. */
1455 cur = res->sibling;
1456 if (cur && system_ram_resources_mergeable(res, cur)) {
1457 res->end = cur->end;
1458 res->sibling = cur->sibling;
1459 free_resource(cur);
1460 }
1461
1462 /* Try to merge with previous item in the list. */
1463 cur = res->parent->child;
1464 while (cur && cur->sibling != res)
1465 cur = cur->sibling;
1466 if (cur && system_ram_resources_mergeable(cur, res)) {
1467 cur->end = res->end;
1468 cur->sibling = res->sibling;
1469 free_resource(res);
1470 }
1471 write_unlock(&resource_lock);
1472}
1473#endif /* CONFIG_MEMORY_HOTPLUG */
1474
1475/*
1476 * Managed region resource
1477 */
1478static void devm_resource_release(struct device *dev, void *ptr)
1479{
1480 struct resource **r = ptr;
1481
1482 release_resource(*r);
1483}
1484
1485/**
1486 * devm_request_resource() - request and reserve an I/O or memory resource
1487 * @dev: device for which to request the resource
1488 * @root: root of the resource tree from which to request the resource
1489 * @new: descriptor of the resource to request
1490 *
1491 * This is a device-managed version of request_resource(). There is usually
1492 * no need to release resources requested by this function explicitly since
1493 * that will be taken care of when the device is unbound from its driver.
1494 * If for some reason the resource needs to be released explicitly, because
1495 * of ordering issues for example, drivers must call devm_release_resource()
1496 * rather than the regular release_resource().
1497 *
1498 * When a conflict is detected between any existing resources and the newly
1499 * requested resource, an error message will be printed.
1500 *
1501 * Returns 0 on success or a negative error code on failure.
1502 */
1503int devm_request_resource(struct device *dev, struct resource *root,
1504 struct resource *new)
1505{
1506 struct resource *conflict, **ptr;
1507
1508 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1509 if (!ptr)
1510 return -ENOMEM;
1511
1512 *ptr = new;
1513
1514 conflict = request_resource_conflict(root, new);
1515 if (conflict) {
1516 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1517 new, conflict->name, conflict);
1518 devres_free(ptr);
1519 return -EBUSY;
1520 }
1521
1522 devres_add(dev, ptr);
1523 return 0;
1524}
1525EXPORT_SYMBOL(devm_request_resource);
1526
1527static int devm_resource_match(struct device *dev, void *res, void *data)
1528{
1529 struct resource **ptr = res;
1530
1531 return *ptr == data;
1532}
1533
1534/**
1535 * devm_release_resource() - release a previously requested resource
1536 * @dev: device for which to release the resource
1537 * @new: descriptor of the resource to release
1538 *
1539 * Releases a resource previously requested using devm_request_resource().
1540 */
1541void devm_release_resource(struct device *dev, struct resource *new)
1542{
1543 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1544 new));
1545}
1546EXPORT_SYMBOL(devm_release_resource);
1547
1548struct region_devres {
1549 struct resource *parent;
1550 resource_size_t start;
1551 resource_size_t n;
1552};
1553
1554static void devm_region_release(struct device *dev, void *res)
1555{
1556 struct region_devres *this = res;
1557
1558 __release_region(this->parent, this->start, this->n);
1559}
1560
1561static int devm_region_match(struct device *dev, void *res, void *match_data)
1562{
1563 struct region_devres *this = res, *match = match_data;
1564
1565 return this->parent == match->parent &&
1566 this->start == match->start && this->n == match->n;
1567}
1568
1569struct resource *
1570__devm_request_region(struct device *dev, struct resource *parent,
1571 resource_size_t start, resource_size_t n, const char *name)
1572{
1573 struct region_devres *dr = NULL;
1574 struct resource *res;
1575
1576 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1577 GFP_KERNEL);
1578 if (!dr)
1579 return NULL;
1580
1581 dr->parent = parent;
1582 dr->start = start;
1583 dr->n = n;
1584
1585 res = __request_region(parent, start, n, name, 0);
1586 if (res)
1587 devres_add(dev, dr);
1588 else
1589 devres_free(dr);
1590
1591 return res;
1592}
1593EXPORT_SYMBOL(__devm_request_region);
1594
1595void __devm_release_region(struct device *dev, struct resource *parent,
1596 resource_size_t start, resource_size_t n)
1597{
1598 struct region_devres match_data = { parent, start, n };
1599
1600 __release_region(parent, start, n);
1601 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1602 &match_data));
1603}
1604EXPORT_SYMBOL(__devm_release_region);
1605
1606/*
1607 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1608 */
1609#define MAXRESERVE 4
1610static int __init reserve_setup(char *str)
1611{
1612 static int reserved;
1613 static struct resource reserve[MAXRESERVE];
1614
1615 for (;;) {
1616 unsigned int io_start, io_num;
1617 int x = reserved;
1618 struct resource *parent;
1619
1620 if (get_option(&str, &io_start) != 2)
1621 break;
1622 if (get_option(&str, &io_num) == 0)
1623 break;
1624 if (x < MAXRESERVE) {
1625 struct resource *res = reserve + x;
1626
1627 /*
1628 * If the region starts below 0x10000, we assume it's
1629 * I/O port space; otherwise assume it's memory.
1630 */
1631 if (io_start < 0x10000) {
1632 res->flags = IORESOURCE_IO;
1633 parent = &ioport_resource;
1634 } else {
1635 res->flags = IORESOURCE_MEM;
1636 parent = &iomem_resource;
1637 }
1638 res->name = "reserved";
1639 res->start = io_start;
1640 res->end = io_start + io_num - 1;
1641 res->flags |= IORESOURCE_BUSY;
1642 res->desc = IORES_DESC_NONE;
1643 res->child = NULL;
1644 if (request_resource(parent, res) == 0)
1645 reserved = x+1;
1646 }
1647 }
1648 return 1;
1649}
1650__setup("reserve=", reserve_setup);
1651
1652/*
1653 * Check if the requested addr and size spans more than any slot in the
1654 * iomem resource tree.
1655 */
1656int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1657{
1658 struct resource *p = &iomem_resource;
1659 resource_size_t end = addr + size - 1;
1660 int err = 0;
1661 loff_t l;
1662
1663 read_lock(&resource_lock);
1664 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1665 /*
1666 * We can probably skip the resources without
1667 * IORESOURCE_IO attribute?
1668 */
1669 if (p->start > end)
1670 continue;
1671 if (p->end < addr)
1672 continue;
1673 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1674 PFN_DOWN(p->end) >= PFN_DOWN(end))
1675 continue;
1676 /*
1677 * if a resource is "BUSY", it's not a hardware resource
1678 * but a driver mapping of such a resource; we don't want
1679 * to warn for those; some drivers legitimately map only
1680 * partial hardware resources. (example: vesafb)
1681 */
1682 if (p->flags & IORESOURCE_BUSY)
1683 continue;
1684
1685 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1686 &addr, &end, p->name, p);
1687 err = -1;
1688 break;
1689 }
1690 read_unlock(&resource_lock);
1691
1692 return err;
1693}
1694
1695#ifdef CONFIG_STRICT_DEVMEM
1696static int strict_iomem_checks = 1;
1697#else
1698static int strict_iomem_checks;
1699#endif
1700
1701/*
1702 * Check if an address is exclusive to the kernel and must not be mapped to
1703 * user space, for example, via /dev/mem.
1704 *
1705 * Returns true if exclusive to the kernel, otherwise returns false.
1706 */
1707bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1708{
1709 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1710 IORESOURCE_EXCLUSIVE;
1711 bool skip_children = false, err = false;
1712 struct resource *p;
1713
1714 read_lock(&resource_lock);
1715 for_each_resource(root, p, skip_children) {
1716 if (p->start >= addr + size)
1717 break;
1718 if (p->end < addr) {
1719 skip_children = true;
1720 continue;
1721 }
1722 skip_children = false;
1723
1724 /*
1725 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1726 * IORESOURCE_EXCLUSIVE is set, even if they
1727 * are not busy and even if "iomem=relaxed" is set. The
1728 * responsible driver dynamically adds/removes system RAM within
1729 * such an area and uncontrolled access is dangerous.
1730 */
1731 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1732 err = true;
1733 break;
1734 }
1735
1736 /*
1737 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1738 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1739 * resource is busy.
1740 */
1741 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1742 continue;
1743 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1744 || p->flags & IORESOURCE_EXCLUSIVE) {
1745 err = true;
1746 break;
1747 }
1748 }
1749 read_unlock(&resource_lock);
1750
1751 return err;
1752}
1753
1754bool iomem_is_exclusive(u64 addr)
1755{
1756 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1757 PAGE_SIZE);
1758}
1759
1760struct resource_entry *resource_list_create_entry(struct resource *res,
1761 size_t extra_size)
1762{
1763 struct resource_entry *entry;
1764
1765 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1766 if (entry) {
1767 INIT_LIST_HEAD(&entry->node);
1768 entry->res = res ? res : &entry->__res;
1769 }
1770
1771 return entry;
1772}
1773EXPORT_SYMBOL(resource_list_create_entry);
1774
1775void resource_list_free(struct list_head *head)
1776{
1777 struct resource_entry *entry, *tmp;
1778
1779 list_for_each_entry_safe(entry, tmp, head, node)
1780 resource_list_destroy_entry(entry);
1781}
1782EXPORT_SYMBOL(resource_list_free);
1783
1784#ifdef CONFIG_GET_FREE_REGION
1785#define GFR_DESCENDING (1UL << 0)
1786#define GFR_REQUEST_REGION (1UL << 1)
1787#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1788
1789static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1790 resource_size_t align, unsigned long flags)
1791{
1792 if (flags & GFR_DESCENDING) {
1793 resource_size_t end;
1794
1795 end = min_t(resource_size_t, base->end,
1796 (1ULL << MAX_PHYSMEM_BITS) - 1);
1797 return end - size + 1;
1798 }
1799
1800 return ALIGN(base->start, align);
1801}
1802
1803static bool gfr_continue(struct resource *base, resource_size_t addr,
1804 resource_size_t size, unsigned long flags)
1805{
1806 if (flags & GFR_DESCENDING)
1807 return addr > size && addr >= base->start;
1808 /*
1809 * In the ascend case be careful that the last increment by
1810 * @size did not wrap 0.
1811 */
1812 return addr > addr - size &&
1813 addr <= min_t(resource_size_t, base->end,
1814 (1ULL << MAX_PHYSMEM_BITS) - 1);
1815}
1816
1817static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1818 unsigned long flags)
1819{
1820 if (flags & GFR_DESCENDING)
1821 return addr - size;
1822 return addr + size;
1823}
1824
1825static void remove_free_mem_region(void *_res)
1826{
1827 struct resource *res = _res;
1828
1829 if (res->parent)
1830 remove_resource(res);
1831 free_resource(res);
1832}
1833
1834static struct resource *
1835get_free_mem_region(struct device *dev, struct resource *base,
1836 resource_size_t size, const unsigned long align,
1837 const char *name, const unsigned long desc,
1838 const unsigned long flags)
1839{
1840 resource_size_t addr;
1841 struct resource *res;
1842 struct region_devres *dr = NULL;
1843
1844 size = ALIGN(size, align);
1845
1846 res = alloc_resource(GFP_KERNEL);
1847 if (!res)
1848 return ERR_PTR(-ENOMEM);
1849
1850 if (dev && (flags & GFR_REQUEST_REGION)) {
1851 dr = devres_alloc(devm_region_release,
1852 sizeof(struct region_devres), GFP_KERNEL);
1853 if (!dr) {
1854 free_resource(res);
1855 return ERR_PTR(-ENOMEM);
1856 }
1857 } else if (dev) {
1858 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1859 return ERR_PTR(-ENOMEM);
1860 }
1861
1862 write_lock(&resource_lock);
1863 for (addr = gfr_start(base, size, align, flags);
1864 gfr_continue(base, addr, size, flags);
1865 addr = gfr_next(addr, size, flags)) {
1866 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1867 REGION_DISJOINT)
1868 continue;
1869
1870 if (flags & GFR_REQUEST_REGION) {
1871 if (__request_region_locked(res, &iomem_resource, addr,
1872 size, name, 0))
1873 break;
1874
1875 if (dev) {
1876 dr->parent = &iomem_resource;
1877 dr->start = addr;
1878 dr->n = size;
1879 devres_add(dev, dr);
1880 }
1881
1882 res->desc = desc;
1883 write_unlock(&resource_lock);
1884
1885
1886 /*
1887 * A driver is claiming this region so revoke any
1888 * mappings.
1889 */
1890 revoke_iomem(res);
1891 } else {
1892 res->start = addr;
1893 res->end = addr + size - 1;
1894 res->name = name;
1895 res->desc = desc;
1896 res->flags = IORESOURCE_MEM;
1897
1898 /*
1899 * Only succeed if the resource hosts an exclusive
1900 * range after the insert
1901 */
1902 if (__insert_resource(base, res) || res->child)
1903 break;
1904
1905 write_unlock(&resource_lock);
1906 }
1907
1908 return res;
1909 }
1910 write_unlock(&resource_lock);
1911
1912 if (flags & GFR_REQUEST_REGION) {
1913 free_resource(res);
1914 devres_free(dr);
1915 } else if (dev)
1916 devm_release_action(dev, remove_free_mem_region, res);
1917
1918 return ERR_PTR(-ERANGE);
1919}
1920
1921/**
1922 * devm_request_free_mem_region - find free region for device private memory
1923 *
1924 * @dev: device struct to bind the resource to
1925 * @size: size in bytes of the device memory to add
1926 * @base: resource tree to look in
1927 *
1928 * This function tries to find an empty range of physical address big enough to
1929 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1930 * memory, which in turn allocates struct pages.
1931 */
1932struct resource *devm_request_free_mem_region(struct device *dev,
1933 struct resource *base, unsigned long size)
1934{
1935 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1936
1937 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
1938 dev_name(dev),
1939 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1940}
1941EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1942
1943struct resource *request_free_mem_region(struct resource *base,
1944 unsigned long size, const char *name)
1945{
1946 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1947
1948 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
1949 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1950}
1951EXPORT_SYMBOL_GPL(request_free_mem_region);
1952
1953/**
1954 * alloc_free_mem_region - find a free region relative to @base
1955 * @base: resource that will parent the new resource
1956 * @size: size in bytes of memory to allocate from @base
1957 * @align: alignment requirements for the allocation
1958 * @name: resource name
1959 *
1960 * Buses like CXL, that can dynamically instantiate new memory regions,
1961 * need a method to allocate physical address space for those regions.
1962 * Allocate and insert a new resource to cover a free, unclaimed by a
1963 * descendant of @base, range in the span of @base.
1964 */
1965struct resource *alloc_free_mem_region(struct resource *base,
1966 unsigned long size, unsigned long align,
1967 const char *name)
1968{
1969 /* Default of ascending direction and insert resource */
1970 unsigned long flags = 0;
1971
1972 return get_free_mem_region(NULL, base, size, align, name,
1973 IORES_DESC_NONE, flags);
1974}
1975EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
1976#endif /* CONFIG_GET_FREE_REGION */
1977
1978static int __init strict_iomem(char *str)
1979{
1980 if (strstr(str, "relaxed"))
1981 strict_iomem_checks = 0;
1982 if (strstr(str, "strict"))
1983 strict_iomem_checks = 1;
1984 return 1;
1985}
1986
1987static int iomem_fs_init_fs_context(struct fs_context *fc)
1988{
1989 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1990}
1991
1992static struct file_system_type iomem_fs_type = {
1993 .name = "iomem",
1994 .owner = THIS_MODULE,
1995 .init_fs_context = iomem_fs_init_fs_context,
1996 .kill_sb = kill_anon_super,
1997};
1998
1999static int __init iomem_init_inode(void)
2000{
2001 static struct vfsmount *iomem_vfs_mount;
2002 static int iomem_fs_cnt;
2003 struct inode *inode;
2004 int rc;
2005
2006 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2007 if (rc < 0) {
2008 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2009 return rc;
2010 }
2011
2012 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2013 if (IS_ERR(inode)) {
2014 rc = PTR_ERR(inode);
2015 pr_err("Cannot allocate inode for iomem: %d\n", rc);
2016 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2017 return rc;
2018 }
2019
2020 /*
2021 * Publish iomem revocation inode initialized.
2022 * Pairs with smp_load_acquire() in revoke_iomem().
2023 */
2024 smp_store_release(&iomem_inode, inode);
2025
2026 return 0;
2027}
2028
2029fs_initcall(iomem_init_inode);
2030
2031__setup("iomem=", strict_iomem);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/proc_fs.h>
21#include <linux/pseudo_fs.h>
22#include <linux/sched.h>
23#include <linux/seq_file.h>
24#include <linux/device.h>
25#include <linux/pfn.h>
26#include <linux/mm.h>
27#include <linux/mount.h>
28#include <linux/resource_ext.h>
29#include <uapi/linux/magic.h>
30#include <linux/string.h>
31#include <linux/vmalloc.h>
32#include <asm/io.h>
33
34
35struct resource ioport_resource = {
36 .name = "PCI IO",
37 .start = 0,
38 .end = IO_SPACE_LIMIT,
39 .flags = IORESOURCE_IO,
40};
41EXPORT_SYMBOL(ioport_resource);
42
43struct resource iomem_resource = {
44 .name = "PCI mem",
45 .start = 0,
46 .end = -1,
47 .flags = IORESOURCE_MEM,
48};
49EXPORT_SYMBOL(iomem_resource);
50
51static DEFINE_RWLOCK(resource_lock);
52
53/*
54 * Return the next node of @p in pre-order tree traversal. If
55 * @skip_children is true, skip the descendant nodes of @p in
56 * traversal. If @p is a descendant of @subtree_root, only traverse
57 * the subtree under @subtree_root.
58 */
59static struct resource *next_resource(struct resource *p, bool skip_children,
60 struct resource *subtree_root)
61{
62 if (!skip_children && p->child)
63 return p->child;
64 while (!p->sibling && p->parent) {
65 p = p->parent;
66 if (p == subtree_root)
67 return NULL;
68 }
69 return p->sibling;
70}
71
72/*
73 * Traverse the resource subtree under @_root in pre-order, excluding
74 * @_root itself.
75 *
76 * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop.
77 * And it is referenced to avoid unused variable warning.
78 */
79#define for_each_resource(_root, _p, _skip_children) \
80 for (typeof(_root) __root = (_root), __p = _p = __root->child; \
81 __p && _p; _p = next_resource(_p, _skip_children, __root))
82
83#ifdef CONFIG_PROC_FS
84
85enum { MAX_IORES_LEVEL = 5 };
86
87static void *r_start(struct seq_file *m, loff_t *pos)
88 __acquires(resource_lock)
89{
90 struct resource *root = pde_data(file_inode(m->file));
91 struct resource *p;
92 loff_t l = *pos;
93
94 read_lock(&resource_lock);
95 for_each_resource(root, p, false) {
96 if (l-- == 0)
97 break;
98 }
99
100 return p;
101}
102
103static void *r_next(struct seq_file *m, void *v, loff_t *pos)
104{
105 struct resource *p = v;
106
107 (*pos)++;
108
109 return (void *)next_resource(p, false, NULL);
110}
111
112static void r_stop(struct seq_file *m, void *v)
113 __releases(resource_lock)
114{
115 read_unlock(&resource_lock);
116}
117
118static int r_show(struct seq_file *m, void *v)
119{
120 struct resource *root = pde_data(file_inode(m->file));
121 struct resource *r = v, *p;
122 unsigned long long start, end;
123 int width = root->end < 0x10000 ? 4 : 8;
124 int depth;
125
126 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
127 if (p->parent == root)
128 break;
129
130 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
131 start = r->start;
132 end = r->end;
133 } else {
134 start = end = 0;
135 }
136
137 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
138 depth * 2, "",
139 width, start,
140 width, end,
141 r->name ? r->name : "<BAD>");
142 return 0;
143}
144
145static const struct seq_operations resource_op = {
146 .start = r_start,
147 .next = r_next,
148 .stop = r_stop,
149 .show = r_show,
150};
151
152static int __init ioresources_init(void)
153{
154 proc_create_seq_data("ioports", 0, NULL, &resource_op,
155 &ioport_resource);
156 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
157 return 0;
158}
159__initcall(ioresources_init);
160
161#endif /* CONFIG_PROC_FS */
162
163static void free_resource(struct resource *res)
164{
165 /**
166 * If the resource was allocated using memblock early during boot
167 * we'll leak it here: we can only return full pages back to the
168 * buddy and trying to be smart and reusing them eventually in
169 * alloc_resource() overcomplicates resource handling.
170 */
171 if (res && PageSlab(virt_to_head_page(res)))
172 kfree(res);
173}
174
175static struct resource *alloc_resource(gfp_t flags)
176{
177 return kzalloc(sizeof(struct resource), flags);
178}
179
180/* Return the conflict entry if you can't request it */
181static struct resource * __request_resource(struct resource *root, struct resource *new)
182{
183 resource_size_t start = new->start;
184 resource_size_t end = new->end;
185 struct resource *tmp, **p;
186
187 if (end < start)
188 return root;
189 if (start < root->start)
190 return root;
191 if (end > root->end)
192 return root;
193 p = &root->child;
194 for (;;) {
195 tmp = *p;
196 if (!tmp || tmp->start > end) {
197 new->sibling = tmp;
198 *p = new;
199 new->parent = root;
200 return NULL;
201 }
202 p = &tmp->sibling;
203 if (tmp->end < start)
204 continue;
205 return tmp;
206 }
207}
208
209static int __release_resource(struct resource *old, bool release_child)
210{
211 struct resource *tmp, **p, *chd;
212
213 p = &old->parent->child;
214 for (;;) {
215 tmp = *p;
216 if (!tmp)
217 break;
218 if (tmp == old) {
219 if (release_child || !(tmp->child)) {
220 *p = tmp->sibling;
221 } else {
222 for (chd = tmp->child;; chd = chd->sibling) {
223 chd->parent = tmp->parent;
224 if (!(chd->sibling))
225 break;
226 }
227 *p = tmp->child;
228 chd->sibling = tmp->sibling;
229 }
230 old->parent = NULL;
231 return 0;
232 }
233 p = &tmp->sibling;
234 }
235 return -EINVAL;
236}
237
238static void __release_child_resources(struct resource *r)
239{
240 struct resource *tmp, *p;
241 resource_size_t size;
242
243 p = r->child;
244 r->child = NULL;
245 while (p) {
246 tmp = p;
247 p = p->sibling;
248
249 tmp->parent = NULL;
250 tmp->sibling = NULL;
251 __release_child_resources(tmp);
252
253 printk(KERN_DEBUG "release child resource %pR\n", tmp);
254 /* need to restore size, and keep flags */
255 size = resource_size(tmp);
256 tmp->start = 0;
257 tmp->end = size - 1;
258 }
259}
260
261void release_child_resources(struct resource *r)
262{
263 write_lock(&resource_lock);
264 __release_child_resources(r);
265 write_unlock(&resource_lock);
266}
267
268/**
269 * request_resource_conflict - request and reserve an I/O or memory resource
270 * @root: root resource descriptor
271 * @new: resource descriptor desired by caller
272 *
273 * Returns 0 for success, conflict resource on error.
274 */
275struct resource *request_resource_conflict(struct resource *root, struct resource *new)
276{
277 struct resource *conflict;
278
279 write_lock(&resource_lock);
280 conflict = __request_resource(root, new);
281 write_unlock(&resource_lock);
282 return conflict;
283}
284
285/**
286 * request_resource - request and reserve an I/O or memory resource
287 * @root: root resource descriptor
288 * @new: resource descriptor desired by caller
289 *
290 * Returns 0 for success, negative error code on error.
291 */
292int request_resource(struct resource *root, struct resource *new)
293{
294 struct resource *conflict;
295
296 conflict = request_resource_conflict(root, new);
297 return conflict ? -EBUSY : 0;
298}
299
300EXPORT_SYMBOL(request_resource);
301
302/**
303 * release_resource - release a previously reserved resource
304 * @old: resource pointer
305 */
306int release_resource(struct resource *old)
307{
308 int retval;
309
310 write_lock(&resource_lock);
311 retval = __release_resource(old, true);
312 write_unlock(&resource_lock);
313 return retval;
314}
315
316EXPORT_SYMBOL(release_resource);
317
318static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc)
319{
320 return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc);
321}
322
323/**
324 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
325 * [@start..@end].
326 *
327 * If a resource is found, returns 0 and @*res is overwritten with the part
328 * of the resource that's within [@start..@end]; if none is found, returns
329 * -ENODEV. Returns -EINVAL for invalid parameters.
330 *
331 * @start: start address of the resource searched for
332 * @end: end address of same resource
333 * @flags: flags which the resource must have
334 * @desc: descriptor the resource must have
335 * @res: return ptr, if resource found
336 *
337 * The caller must specify @start, @end, @flags, and @desc
338 * (which may be IORES_DESC_NONE).
339 */
340static int find_next_iomem_res(resource_size_t start, resource_size_t end,
341 unsigned long flags, unsigned long desc,
342 struct resource *res)
343{
344 struct resource *p;
345
346 if (!res)
347 return -EINVAL;
348
349 if (start >= end)
350 return -EINVAL;
351
352 read_lock(&resource_lock);
353
354 for_each_resource(&iomem_resource, p, false) {
355 /* If we passed the resource we are looking for, stop */
356 if (p->start > end) {
357 p = NULL;
358 break;
359 }
360
361 /* Skip until we find a range that matches what we look for */
362 if (p->end < start)
363 continue;
364
365 /* Found a match, break */
366 if (is_type_match(p, flags, desc))
367 break;
368 }
369
370 if (p) {
371 /* copy data */
372 *res = (struct resource) {
373 .start = max(start, p->start),
374 .end = min(end, p->end),
375 .flags = p->flags,
376 .desc = p->desc,
377 .parent = p->parent,
378 };
379 }
380
381 read_unlock(&resource_lock);
382 return p ? 0 : -ENODEV;
383}
384
385static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
386 unsigned long flags, unsigned long desc,
387 void *arg,
388 int (*func)(struct resource *, void *))
389{
390 struct resource res;
391 int ret = -EINVAL;
392
393 while (start < end &&
394 !find_next_iomem_res(start, end, flags, desc, &res)) {
395 ret = (*func)(&res, arg);
396 if (ret)
397 break;
398
399 start = res.end + 1;
400 }
401
402 return ret;
403}
404
405/**
406 * walk_iomem_res_desc - Walks through iomem resources and calls func()
407 * with matching resource ranges.
408 * *
409 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
410 * @flags: I/O resource flags
411 * @start: start addr
412 * @end: end addr
413 * @arg: function argument for the callback @func
414 * @func: callback function that is called for each qualifying resource area
415 *
416 * All the memory ranges which overlap start,end and also match flags and
417 * desc are valid candidates.
418 *
419 * NOTE: For a new descriptor search, define a new IORES_DESC in
420 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
421 */
422int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
423 u64 end, void *arg, int (*func)(struct resource *, void *))
424{
425 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
426}
427EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
428
429/*
430 * This function calls the @func callback against all memory ranges of type
431 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
432 * Now, this function is only for System RAM, it deals with full ranges and
433 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
434 * ranges.
435 */
436int walk_system_ram_res(u64 start, u64 end, void *arg,
437 int (*func)(struct resource *, void *))
438{
439 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
440
441 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
442 func);
443}
444
445/*
446 * This function, being a variant of walk_system_ram_res(), calls the @func
447 * callback against all memory ranges of type System RAM which are marked as
448 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
449 * higher to lower.
450 */
451int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
452 int (*func)(struct resource *, void *))
453{
454 struct resource res, *rams;
455 int rams_size = 16, i;
456 unsigned long flags;
457 int ret = -1;
458
459 /* create a list */
460 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
461 if (!rams)
462 return ret;
463
464 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
465 i = 0;
466 while ((start < end) &&
467 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
468 if (i >= rams_size) {
469 /* re-alloc */
470 struct resource *rams_new;
471
472 rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
473 GFP_KERNEL);
474 if (!rams_new)
475 goto out;
476
477 rams = rams_new;
478 rams_size += 16;
479 }
480
481 rams[i++] = res;
482 start = res.end + 1;
483 }
484
485 /* go reverse */
486 for (i--; i >= 0; i--) {
487 ret = (*func)(&rams[i], arg);
488 if (ret)
489 break;
490 }
491
492out:
493 kvfree(rams);
494 return ret;
495}
496
497/*
498 * This function calls the @func callback against all memory ranges, which
499 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
500 */
501int walk_mem_res(u64 start, u64 end, void *arg,
502 int (*func)(struct resource *, void *))
503{
504 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
505
506 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
507 func);
508}
509
510/*
511 * This function calls the @func callback against all memory ranges of type
512 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
513 * It is to be used only for System RAM.
514 */
515int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
516 void *arg, int (*func)(unsigned long, unsigned long, void *))
517{
518 resource_size_t start, end;
519 unsigned long flags;
520 struct resource res;
521 unsigned long pfn, end_pfn;
522 int ret = -EINVAL;
523
524 start = (u64) start_pfn << PAGE_SHIFT;
525 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
526 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
527 while (start < end &&
528 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
529 pfn = PFN_UP(res.start);
530 end_pfn = PFN_DOWN(res.end + 1);
531 if (end_pfn > pfn)
532 ret = (*func)(pfn, end_pfn - pfn, arg);
533 if (ret)
534 break;
535 start = res.end + 1;
536 }
537 return ret;
538}
539
540static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
541{
542 return 1;
543}
544
545/*
546 * This generic page_is_ram() returns true if specified address is
547 * registered as System RAM in iomem_resource list.
548 */
549int __weak page_is_ram(unsigned long pfn)
550{
551 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
552}
553EXPORT_SYMBOL_GPL(page_is_ram);
554
555static int __region_intersects(struct resource *parent, resource_size_t start,
556 size_t size, unsigned long flags,
557 unsigned long desc)
558{
559 int type = 0; int other = 0;
560 struct resource *p, *dp;
561 struct resource res, o;
562 bool covered;
563
564 res.start = start;
565 res.end = start + size - 1;
566
567 for (p = parent->child; p ; p = p->sibling) {
568 if (!resource_intersection(p, &res, &o))
569 continue;
570 if (is_type_match(p, flags, desc)) {
571 type++;
572 continue;
573 }
574 /*
575 * Continue to search in descendant resources as if the
576 * matched descendant resources cover some ranges of 'p'.
577 *
578 * |------------- "CXL Window 0" ------------|
579 * |-- "System RAM" --|
580 *
581 * will behave similar as the following fake resource
582 * tree when searching "System RAM".
583 *
584 * |-- "System RAM" --||-- "CXL Window 0a" --|
585 */
586 covered = false;
587 for_each_resource(p, dp, false) {
588 if (!resource_overlaps(dp, &res))
589 continue;
590 if (is_type_match(dp, flags, desc)) {
591 type++;
592 /*
593 * Range from 'o.start' to 'dp->start'
594 * isn't covered by matched resource.
595 */
596 if (dp->start > o.start)
597 break;
598 if (dp->end >= o.end) {
599 covered = true;
600 break;
601 }
602 /* Remove covered range */
603 o.start = max(o.start, dp->end + 1);
604 }
605 }
606 if (!covered)
607 other++;
608 }
609
610 if (type == 0)
611 return REGION_DISJOINT;
612
613 if (other == 0)
614 return REGION_INTERSECTS;
615
616 return REGION_MIXED;
617}
618
619/**
620 * region_intersects() - determine intersection of region with known resources
621 * @start: region start address
622 * @size: size of region
623 * @flags: flags of resource (in iomem_resource)
624 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
625 *
626 * Check if the specified region partially overlaps or fully eclipses a
627 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
628 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
629 * return REGION_MIXED if the region overlaps @flags/@desc and another
630 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
631 * and no other defined resource. Note that REGION_INTERSECTS is also
632 * returned in the case when the specified region overlaps RAM and undefined
633 * memory holes.
634 *
635 * region_intersect() is used by memory remapping functions to ensure
636 * the user is not remapping RAM and is a vast speed up over walking
637 * through the resource table page by page.
638 */
639int region_intersects(resource_size_t start, size_t size, unsigned long flags,
640 unsigned long desc)
641{
642 int ret;
643
644 read_lock(&resource_lock);
645 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
646 read_unlock(&resource_lock);
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(region_intersects);
651
652void __weak arch_remove_reservations(struct resource *avail)
653{
654}
655
656static void resource_clip(struct resource *res, resource_size_t min,
657 resource_size_t max)
658{
659 if (res->start < min)
660 res->start = min;
661 if (res->end > max)
662 res->end = max;
663}
664
665/*
666 * Find empty space in the resource tree with the given range and
667 * alignment constraints
668 */
669static int __find_resource_space(struct resource *root, struct resource *old,
670 struct resource *new, resource_size_t size,
671 struct resource_constraint *constraint)
672{
673 struct resource *this = root->child;
674 struct resource tmp = *new, avail, alloc;
675 resource_alignf alignf = constraint->alignf;
676
677 tmp.start = root->start;
678 /*
679 * Skip past an allocated resource that starts at 0, since the assignment
680 * of this->start - 1 to tmp->end below would cause an underflow.
681 */
682 if (this && this->start == root->start) {
683 tmp.start = (this == old) ? old->start : this->end + 1;
684 this = this->sibling;
685 }
686 for(;;) {
687 if (this)
688 tmp.end = (this == old) ? this->end : this->start - 1;
689 else
690 tmp.end = root->end;
691
692 if (tmp.end < tmp.start)
693 goto next;
694
695 resource_clip(&tmp, constraint->min, constraint->max);
696 arch_remove_reservations(&tmp);
697
698 /* Check for overflow after ALIGN() */
699 avail.start = ALIGN(tmp.start, constraint->align);
700 avail.end = tmp.end;
701 avail.flags = new->flags & ~IORESOURCE_UNSET;
702 if (avail.start >= tmp.start) {
703 alloc.flags = avail.flags;
704 if (alignf) {
705 alloc.start = alignf(constraint->alignf_data,
706 &avail, size, constraint->align);
707 } else {
708 alloc.start = avail.start;
709 }
710 alloc.end = alloc.start + size - 1;
711 if (alloc.start <= alloc.end &&
712 resource_contains(&avail, &alloc)) {
713 new->start = alloc.start;
714 new->end = alloc.end;
715 return 0;
716 }
717 }
718
719next: if (!this || this->end == root->end)
720 break;
721
722 if (this != old)
723 tmp.start = this->end + 1;
724 this = this->sibling;
725 }
726 return -EBUSY;
727}
728
729/**
730 * find_resource_space - Find empty space in the resource tree
731 * @root: Root resource descriptor
732 * @new: Resource descriptor awaiting an empty resource space
733 * @size: The minimum size of the empty space
734 * @constraint: The range and alignment constraints to be met
735 *
736 * Finds an empty space under @root in the resource tree satisfying range and
737 * alignment @constraints.
738 *
739 * Return:
740 * * %0 - if successful, @new members start, end, and flags are altered.
741 * * %-EBUSY - if no empty space was found.
742 */
743int find_resource_space(struct resource *root, struct resource *new,
744 resource_size_t size,
745 struct resource_constraint *constraint)
746{
747 return __find_resource_space(root, NULL, new, size, constraint);
748}
749EXPORT_SYMBOL_GPL(find_resource_space);
750
751/**
752 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
753 * The resource will be relocated if the new size cannot be reallocated in the
754 * current location.
755 *
756 * @root: root resource descriptor
757 * @old: resource descriptor desired by caller
758 * @newsize: new size of the resource descriptor
759 * @constraint: the memory range and alignment constraints to be met.
760 */
761static int reallocate_resource(struct resource *root, struct resource *old,
762 resource_size_t newsize,
763 struct resource_constraint *constraint)
764{
765 int err=0;
766 struct resource new = *old;
767 struct resource *conflict;
768
769 write_lock(&resource_lock);
770
771 if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
772 goto out;
773
774 if (resource_contains(&new, old)) {
775 old->start = new.start;
776 old->end = new.end;
777 goto out;
778 }
779
780 if (old->child) {
781 err = -EBUSY;
782 goto out;
783 }
784
785 if (resource_contains(old, &new)) {
786 old->start = new.start;
787 old->end = new.end;
788 } else {
789 __release_resource(old, true);
790 *old = new;
791 conflict = __request_resource(root, old);
792 BUG_ON(conflict);
793 }
794out:
795 write_unlock(&resource_lock);
796 return err;
797}
798
799
800/**
801 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
802 * The resource will be reallocated with a new size if it was already allocated
803 * @root: root resource descriptor
804 * @new: resource descriptor desired by caller
805 * @size: requested resource region size
806 * @min: minimum boundary to allocate
807 * @max: maximum boundary to allocate
808 * @align: alignment requested, in bytes
809 * @alignf: alignment function, optional, called if not NULL
810 * @alignf_data: arbitrary data to pass to the @alignf function
811 */
812int allocate_resource(struct resource *root, struct resource *new,
813 resource_size_t size, resource_size_t min,
814 resource_size_t max, resource_size_t align,
815 resource_alignf alignf,
816 void *alignf_data)
817{
818 int err;
819 struct resource_constraint constraint;
820
821 constraint.min = min;
822 constraint.max = max;
823 constraint.align = align;
824 constraint.alignf = alignf;
825 constraint.alignf_data = alignf_data;
826
827 if ( new->parent ) {
828 /* resource is already allocated, try reallocating with
829 the new constraints */
830 return reallocate_resource(root, new, size, &constraint);
831 }
832
833 write_lock(&resource_lock);
834 err = find_resource_space(root, new, size, &constraint);
835 if (err >= 0 && __request_resource(root, new))
836 err = -EBUSY;
837 write_unlock(&resource_lock);
838 return err;
839}
840
841EXPORT_SYMBOL(allocate_resource);
842
843/**
844 * lookup_resource - find an existing resource by a resource start address
845 * @root: root resource descriptor
846 * @start: resource start address
847 *
848 * Returns a pointer to the resource if found, NULL otherwise
849 */
850struct resource *lookup_resource(struct resource *root, resource_size_t start)
851{
852 struct resource *res;
853
854 read_lock(&resource_lock);
855 for (res = root->child; res; res = res->sibling) {
856 if (res->start == start)
857 break;
858 }
859 read_unlock(&resource_lock);
860
861 return res;
862}
863
864/*
865 * Insert a resource into the resource tree. If successful, return NULL,
866 * otherwise return the conflicting resource (compare to __request_resource())
867 */
868static struct resource * __insert_resource(struct resource *parent, struct resource *new)
869{
870 struct resource *first, *next;
871
872 for (;; parent = first) {
873 first = __request_resource(parent, new);
874 if (!first)
875 return first;
876
877 if (first == parent)
878 return first;
879 if (WARN_ON(first == new)) /* duplicated insertion */
880 return first;
881
882 if ((first->start > new->start) || (first->end < new->end))
883 break;
884 if ((first->start == new->start) && (first->end == new->end))
885 break;
886 }
887
888 for (next = first; ; next = next->sibling) {
889 /* Partial overlap? Bad, and unfixable */
890 if (next->start < new->start || next->end > new->end)
891 return next;
892 if (!next->sibling)
893 break;
894 if (next->sibling->start > new->end)
895 break;
896 }
897
898 new->parent = parent;
899 new->sibling = next->sibling;
900 new->child = first;
901
902 next->sibling = NULL;
903 for (next = first; next; next = next->sibling)
904 next->parent = new;
905
906 if (parent->child == first) {
907 parent->child = new;
908 } else {
909 next = parent->child;
910 while (next->sibling != first)
911 next = next->sibling;
912 next->sibling = new;
913 }
914 return NULL;
915}
916
917/**
918 * insert_resource_conflict - Inserts resource in the resource tree
919 * @parent: parent of the new resource
920 * @new: new resource to insert
921 *
922 * Returns 0 on success, conflict resource if the resource can't be inserted.
923 *
924 * This function is equivalent to request_resource_conflict when no conflict
925 * happens. If a conflict happens, and the conflicting resources
926 * entirely fit within the range of the new resource, then the new
927 * resource is inserted and the conflicting resources become children of
928 * the new resource.
929 *
930 * This function is intended for producers of resources, such as FW modules
931 * and bus drivers.
932 */
933struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
934{
935 struct resource *conflict;
936
937 write_lock(&resource_lock);
938 conflict = __insert_resource(parent, new);
939 write_unlock(&resource_lock);
940 return conflict;
941}
942
943/**
944 * insert_resource - Inserts a resource in the resource tree
945 * @parent: parent of the new resource
946 * @new: new resource to insert
947 *
948 * Returns 0 on success, -EBUSY if the resource can't be inserted.
949 *
950 * This function is intended for producers of resources, such as FW modules
951 * and bus drivers.
952 */
953int insert_resource(struct resource *parent, struct resource *new)
954{
955 struct resource *conflict;
956
957 conflict = insert_resource_conflict(parent, new);
958 return conflict ? -EBUSY : 0;
959}
960EXPORT_SYMBOL_GPL(insert_resource);
961
962/**
963 * insert_resource_expand_to_fit - Insert a resource into the resource tree
964 * @root: root resource descriptor
965 * @new: new resource to insert
966 *
967 * Insert a resource into the resource tree, possibly expanding it in order
968 * to make it encompass any conflicting resources.
969 */
970void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
971{
972 if (new->parent)
973 return;
974
975 write_lock(&resource_lock);
976 for (;;) {
977 struct resource *conflict;
978
979 conflict = __insert_resource(root, new);
980 if (!conflict)
981 break;
982 if (conflict == root)
983 break;
984
985 /* Ok, expand resource to cover the conflict, then try again .. */
986 if (conflict->start < new->start)
987 new->start = conflict->start;
988 if (conflict->end > new->end)
989 new->end = conflict->end;
990
991 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
992 }
993 write_unlock(&resource_lock);
994}
995/*
996 * Not for general consumption, only early boot memory map parsing, PCI
997 * resource discovery, and late discovery of CXL resources are expected
998 * to use this interface. The former are built-in and only the latter,
999 * CXL, is a module.
1000 */
1001EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL");
1002
1003/**
1004 * remove_resource - Remove a resource in the resource tree
1005 * @old: resource to remove
1006 *
1007 * Returns 0 on success, -EINVAL if the resource is not valid.
1008 *
1009 * This function removes a resource previously inserted by insert_resource()
1010 * or insert_resource_conflict(), and moves the children (if any) up to
1011 * where they were before. insert_resource() and insert_resource_conflict()
1012 * insert a new resource, and move any conflicting resources down to the
1013 * children of the new resource.
1014 *
1015 * insert_resource(), insert_resource_conflict() and remove_resource() are
1016 * intended for producers of resources, such as FW modules and bus drivers.
1017 */
1018int remove_resource(struct resource *old)
1019{
1020 int retval;
1021
1022 write_lock(&resource_lock);
1023 retval = __release_resource(old, false);
1024 write_unlock(&resource_lock);
1025 return retval;
1026}
1027EXPORT_SYMBOL_GPL(remove_resource);
1028
1029static int __adjust_resource(struct resource *res, resource_size_t start,
1030 resource_size_t size)
1031{
1032 struct resource *tmp, *parent = res->parent;
1033 resource_size_t end = start + size - 1;
1034 int result = -EBUSY;
1035
1036 if (!parent)
1037 goto skip;
1038
1039 if ((start < parent->start) || (end > parent->end))
1040 goto out;
1041
1042 if (res->sibling && (res->sibling->start <= end))
1043 goto out;
1044
1045 tmp = parent->child;
1046 if (tmp != res) {
1047 while (tmp->sibling != res)
1048 tmp = tmp->sibling;
1049 if (start <= tmp->end)
1050 goto out;
1051 }
1052
1053skip:
1054 for (tmp = res->child; tmp; tmp = tmp->sibling)
1055 if ((tmp->start < start) || (tmp->end > end))
1056 goto out;
1057
1058 res->start = start;
1059 res->end = end;
1060 result = 0;
1061
1062 out:
1063 return result;
1064}
1065
1066/**
1067 * adjust_resource - modify a resource's start and size
1068 * @res: resource to modify
1069 * @start: new start value
1070 * @size: new size
1071 *
1072 * Given an existing resource, change its start and size to match the
1073 * arguments. Returns 0 on success, -EBUSY if it can't fit.
1074 * Existing children of the resource are assumed to be immutable.
1075 */
1076int adjust_resource(struct resource *res, resource_size_t start,
1077 resource_size_t size)
1078{
1079 int result;
1080
1081 write_lock(&resource_lock);
1082 result = __adjust_resource(res, start, size);
1083 write_unlock(&resource_lock);
1084 return result;
1085}
1086EXPORT_SYMBOL(adjust_resource);
1087
1088static void __init
1089__reserve_region_with_split(struct resource *root, resource_size_t start,
1090 resource_size_t end, const char *name)
1091{
1092 struct resource *parent = root;
1093 struct resource *conflict;
1094 struct resource *res = alloc_resource(GFP_ATOMIC);
1095 struct resource *next_res = NULL;
1096 int type = resource_type(root);
1097
1098 if (!res)
1099 return;
1100
1101 res->name = name;
1102 res->start = start;
1103 res->end = end;
1104 res->flags = type | IORESOURCE_BUSY;
1105 res->desc = IORES_DESC_NONE;
1106
1107 while (1) {
1108
1109 conflict = __request_resource(parent, res);
1110 if (!conflict) {
1111 if (!next_res)
1112 break;
1113 res = next_res;
1114 next_res = NULL;
1115 continue;
1116 }
1117
1118 /* conflict covered whole area */
1119 if (conflict->start <= res->start &&
1120 conflict->end >= res->end) {
1121 free_resource(res);
1122 WARN_ON(next_res);
1123 break;
1124 }
1125
1126 /* failed, split and try again */
1127 if (conflict->start > res->start) {
1128 end = res->end;
1129 res->end = conflict->start - 1;
1130 if (conflict->end < end) {
1131 next_res = alloc_resource(GFP_ATOMIC);
1132 if (!next_res) {
1133 free_resource(res);
1134 break;
1135 }
1136 next_res->name = name;
1137 next_res->start = conflict->end + 1;
1138 next_res->end = end;
1139 next_res->flags = type | IORESOURCE_BUSY;
1140 next_res->desc = IORES_DESC_NONE;
1141 }
1142 } else {
1143 res->start = conflict->end + 1;
1144 }
1145 }
1146
1147}
1148
1149void __init
1150reserve_region_with_split(struct resource *root, resource_size_t start,
1151 resource_size_t end, const char *name)
1152{
1153 int abort = 0;
1154
1155 write_lock(&resource_lock);
1156 if (root->start > start || root->end < end) {
1157 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1158 (unsigned long long)start, (unsigned long long)end,
1159 root);
1160 if (start > root->end || end < root->start)
1161 abort = 1;
1162 else {
1163 if (end > root->end)
1164 end = root->end;
1165 if (start < root->start)
1166 start = root->start;
1167 pr_err("fixing request to [0x%llx-0x%llx]\n",
1168 (unsigned long long)start,
1169 (unsigned long long)end);
1170 }
1171 dump_stack();
1172 }
1173 if (!abort)
1174 __reserve_region_with_split(root, start, end, name);
1175 write_unlock(&resource_lock);
1176}
1177
1178/**
1179 * resource_alignment - calculate resource's alignment
1180 * @res: resource pointer
1181 *
1182 * Returns alignment on success, 0 (invalid alignment) on failure.
1183 */
1184resource_size_t resource_alignment(struct resource *res)
1185{
1186 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1187 case IORESOURCE_SIZEALIGN:
1188 return resource_size(res);
1189 case IORESOURCE_STARTALIGN:
1190 return res->start;
1191 default:
1192 return 0;
1193 }
1194}
1195
1196/*
1197 * This is compatibility stuff for IO resources.
1198 *
1199 * Note how this, unlike the above, knows about
1200 * the IO flag meanings (busy etc).
1201 *
1202 * request_region creates a new busy region.
1203 *
1204 * release_region releases a matching busy region.
1205 */
1206
1207static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1208
1209static struct inode *iomem_inode;
1210
1211#ifdef CONFIG_IO_STRICT_DEVMEM
1212static void revoke_iomem(struct resource *res)
1213{
1214 /* pairs with smp_store_release() in iomem_init_inode() */
1215 struct inode *inode = smp_load_acquire(&iomem_inode);
1216
1217 /*
1218 * Check that the initialization has completed. Losing the race
1219 * is ok because it means drivers are claiming resources before
1220 * the fs_initcall level of init and prevent iomem_get_mapping users
1221 * from establishing mappings.
1222 */
1223 if (!inode)
1224 return;
1225
1226 /*
1227 * The expectation is that the driver has successfully marked
1228 * the resource busy by this point, so devmem_is_allowed()
1229 * should start returning false, however for performance this
1230 * does not iterate the entire resource range.
1231 */
1232 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1233 devmem_is_allowed(PHYS_PFN(res->end))) {
1234 /*
1235 * *cringe* iomem=relaxed says "go ahead, what's the
1236 * worst that can happen?"
1237 */
1238 return;
1239 }
1240
1241 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1242}
1243#else
1244static void revoke_iomem(struct resource *res) {}
1245#endif
1246
1247struct address_space *iomem_get_mapping(void)
1248{
1249 /*
1250 * This function is only called from file open paths, hence guaranteed
1251 * that fs_initcalls have completed and no need to check for NULL. But
1252 * since revoke_iomem can be called before the initcall we still need
1253 * the barrier to appease checkers.
1254 */
1255 return smp_load_acquire(&iomem_inode)->i_mapping;
1256}
1257
1258static int __request_region_locked(struct resource *res, struct resource *parent,
1259 resource_size_t start, resource_size_t n,
1260 const char *name, int flags)
1261{
1262 DECLARE_WAITQUEUE(wait, current);
1263
1264 res->name = name;
1265 res->start = start;
1266 res->end = start + n - 1;
1267
1268 for (;;) {
1269 struct resource *conflict;
1270
1271 res->flags = resource_type(parent) | resource_ext_type(parent);
1272 res->flags |= IORESOURCE_BUSY | flags;
1273 res->desc = parent->desc;
1274
1275 conflict = __request_resource(parent, res);
1276 if (!conflict)
1277 break;
1278 /*
1279 * mm/hmm.c reserves physical addresses which then
1280 * become unavailable to other users. Conflicts are
1281 * not expected. Warn to aid debugging if encountered.
1282 */
1283 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1284 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1285 conflict->name, conflict, res);
1286 }
1287 if (conflict != parent) {
1288 if (!(conflict->flags & IORESOURCE_BUSY)) {
1289 parent = conflict;
1290 continue;
1291 }
1292 }
1293 if (conflict->flags & flags & IORESOURCE_MUXED) {
1294 add_wait_queue(&muxed_resource_wait, &wait);
1295 write_unlock(&resource_lock);
1296 set_current_state(TASK_UNINTERRUPTIBLE);
1297 schedule();
1298 remove_wait_queue(&muxed_resource_wait, &wait);
1299 write_lock(&resource_lock);
1300 continue;
1301 }
1302 /* Uhhuh, that didn't work out.. */
1303 return -EBUSY;
1304 }
1305
1306 return 0;
1307}
1308
1309/**
1310 * __request_region - create a new busy resource region
1311 * @parent: parent resource descriptor
1312 * @start: resource start address
1313 * @n: resource region size
1314 * @name: reserving caller's ID string
1315 * @flags: IO resource flags
1316 */
1317struct resource *__request_region(struct resource *parent,
1318 resource_size_t start, resource_size_t n,
1319 const char *name, int flags)
1320{
1321 struct resource *res = alloc_resource(GFP_KERNEL);
1322 int ret;
1323
1324 if (!res)
1325 return NULL;
1326
1327 write_lock(&resource_lock);
1328 ret = __request_region_locked(res, parent, start, n, name, flags);
1329 write_unlock(&resource_lock);
1330
1331 if (ret) {
1332 free_resource(res);
1333 return NULL;
1334 }
1335
1336 if (parent == &iomem_resource)
1337 revoke_iomem(res);
1338
1339 return res;
1340}
1341EXPORT_SYMBOL(__request_region);
1342
1343/**
1344 * __release_region - release a previously reserved resource region
1345 * @parent: parent resource descriptor
1346 * @start: resource start address
1347 * @n: resource region size
1348 *
1349 * The described resource region must match a currently busy region.
1350 */
1351void __release_region(struct resource *parent, resource_size_t start,
1352 resource_size_t n)
1353{
1354 struct resource **p;
1355 resource_size_t end;
1356
1357 p = &parent->child;
1358 end = start + n - 1;
1359
1360 write_lock(&resource_lock);
1361
1362 for (;;) {
1363 struct resource *res = *p;
1364
1365 if (!res)
1366 break;
1367 if (res->start <= start && res->end >= end) {
1368 if (!(res->flags & IORESOURCE_BUSY)) {
1369 p = &res->child;
1370 continue;
1371 }
1372 if (res->start != start || res->end != end)
1373 break;
1374 *p = res->sibling;
1375 write_unlock(&resource_lock);
1376 if (res->flags & IORESOURCE_MUXED)
1377 wake_up(&muxed_resource_wait);
1378 free_resource(res);
1379 return;
1380 }
1381 p = &res->sibling;
1382 }
1383
1384 write_unlock(&resource_lock);
1385
1386 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1387}
1388EXPORT_SYMBOL(__release_region);
1389
1390#ifdef CONFIG_MEMORY_HOTREMOVE
1391/**
1392 * release_mem_region_adjustable - release a previously reserved memory region
1393 * @start: resource start address
1394 * @size: resource region size
1395 *
1396 * This interface is intended for memory hot-delete. The requested region
1397 * is released from a currently busy memory resource. The requested region
1398 * must either match exactly or fit into a single busy resource entry. In
1399 * the latter case, the remaining resource is adjusted accordingly.
1400 * Existing children of the busy memory resource must be immutable in the
1401 * request.
1402 *
1403 * Note:
1404 * - Additional release conditions, such as overlapping region, can be
1405 * supported after they are confirmed as valid cases.
1406 * - When a busy memory resource gets split into two entries, the code
1407 * assumes that all children remain in the lower address entry for
1408 * simplicity. Enhance this logic when necessary.
1409 */
1410void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1411{
1412 struct resource *parent = &iomem_resource;
1413 struct resource *new_res = NULL;
1414 bool alloc_nofail = false;
1415 struct resource **p;
1416 struct resource *res;
1417 resource_size_t end;
1418
1419 end = start + size - 1;
1420 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1421 return;
1422
1423 /*
1424 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1425 * just before releasing the region. This is highly unlikely to
1426 * fail - let's play save and make it never fail as the caller cannot
1427 * perform any error handling (e.g., trying to re-add memory will fail
1428 * similarly).
1429 */
1430retry:
1431 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1432
1433 p = &parent->child;
1434 write_lock(&resource_lock);
1435
1436 while ((res = *p)) {
1437 if (res->start >= end)
1438 break;
1439
1440 /* look for the next resource if it does not fit into */
1441 if (res->start > start || res->end < end) {
1442 p = &res->sibling;
1443 continue;
1444 }
1445
1446 if (!(res->flags & IORESOURCE_MEM))
1447 break;
1448
1449 if (!(res->flags & IORESOURCE_BUSY)) {
1450 p = &res->child;
1451 continue;
1452 }
1453
1454 /* found the target resource; let's adjust accordingly */
1455 if (res->start == start && res->end == end) {
1456 /* free the whole entry */
1457 *p = res->sibling;
1458 free_resource(res);
1459 } else if (res->start == start && res->end != end) {
1460 /* adjust the start */
1461 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1462 res->end - end));
1463 } else if (res->start != start && res->end == end) {
1464 /* adjust the end */
1465 WARN_ON_ONCE(__adjust_resource(res, res->start,
1466 start - res->start));
1467 } else {
1468 /* split into two entries - we need a new resource */
1469 if (!new_res) {
1470 new_res = alloc_resource(GFP_ATOMIC);
1471 if (!new_res) {
1472 alloc_nofail = true;
1473 write_unlock(&resource_lock);
1474 goto retry;
1475 }
1476 }
1477 new_res->name = res->name;
1478 new_res->start = end + 1;
1479 new_res->end = res->end;
1480 new_res->flags = res->flags;
1481 new_res->desc = res->desc;
1482 new_res->parent = res->parent;
1483 new_res->sibling = res->sibling;
1484 new_res->child = NULL;
1485
1486 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1487 start - res->start)))
1488 break;
1489 res->sibling = new_res;
1490 new_res = NULL;
1491 }
1492
1493 break;
1494 }
1495
1496 write_unlock(&resource_lock);
1497 free_resource(new_res);
1498}
1499#endif /* CONFIG_MEMORY_HOTREMOVE */
1500
1501#ifdef CONFIG_MEMORY_HOTPLUG
1502static bool system_ram_resources_mergeable(struct resource *r1,
1503 struct resource *r2)
1504{
1505 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1506 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1507 r1->name == r2->name && r1->desc == r2->desc &&
1508 !r1->child && !r2->child;
1509}
1510
1511/**
1512 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1513 * merge it with adjacent, mergeable resources
1514 * @res: resource descriptor
1515 *
1516 * This interface is intended for memory hotplug, whereby lots of contiguous
1517 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1518 * the actual resource boundaries are not of interest (e.g., it might be
1519 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1520 * same parent, and that don't have any children are considered. All mergeable
1521 * resources must be immutable during the request.
1522 *
1523 * Note:
1524 * - The caller has to make sure that no pointers to resources that are
1525 * marked mergeable are used anymore after this call - the resource might
1526 * be freed and the pointer might be stale!
1527 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1528 */
1529void merge_system_ram_resource(struct resource *res)
1530{
1531 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1532 struct resource *cur;
1533
1534 if (WARN_ON_ONCE((res->flags & flags) != flags))
1535 return;
1536
1537 write_lock(&resource_lock);
1538 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1539
1540 /* Try to merge with next item in the list. */
1541 cur = res->sibling;
1542 if (cur && system_ram_resources_mergeable(res, cur)) {
1543 res->end = cur->end;
1544 res->sibling = cur->sibling;
1545 free_resource(cur);
1546 }
1547
1548 /* Try to merge with previous item in the list. */
1549 cur = res->parent->child;
1550 while (cur && cur->sibling != res)
1551 cur = cur->sibling;
1552 if (cur && system_ram_resources_mergeable(cur, res)) {
1553 cur->end = res->end;
1554 cur->sibling = res->sibling;
1555 free_resource(res);
1556 }
1557 write_unlock(&resource_lock);
1558}
1559#endif /* CONFIG_MEMORY_HOTPLUG */
1560
1561/*
1562 * Managed region resource
1563 */
1564static void devm_resource_release(struct device *dev, void *ptr)
1565{
1566 struct resource **r = ptr;
1567
1568 release_resource(*r);
1569}
1570
1571/**
1572 * devm_request_resource() - request and reserve an I/O or memory resource
1573 * @dev: device for which to request the resource
1574 * @root: root of the resource tree from which to request the resource
1575 * @new: descriptor of the resource to request
1576 *
1577 * This is a device-managed version of request_resource(). There is usually
1578 * no need to release resources requested by this function explicitly since
1579 * that will be taken care of when the device is unbound from its driver.
1580 * If for some reason the resource needs to be released explicitly, because
1581 * of ordering issues for example, drivers must call devm_release_resource()
1582 * rather than the regular release_resource().
1583 *
1584 * When a conflict is detected between any existing resources and the newly
1585 * requested resource, an error message will be printed.
1586 *
1587 * Returns 0 on success or a negative error code on failure.
1588 */
1589int devm_request_resource(struct device *dev, struct resource *root,
1590 struct resource *new)
1591{
1592 struct resource *conflict, **ptr;
1593
1594 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1595 if (!ptr)
1596 return -ENOMEM;
1597
1598 *ptr = new;
1599
1600 conflict = request_resource_conflict(root, new);
1601 if (conflict) {
1602 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1603 new, conflict->name, conflict);
1604 devres_free(ptr);
1605 return -EBUSY;
1606 }
1607
1608 devres_add(dev, ptr);
1609 return 0;
1610}
1611EXPORT_SYMBOL(devm_request_resource);
1612
1613static int devm_resource_match(struct device *dev, void *res, void *data)
1614{
1615 struct resource **ptr = res;
1616
1617 return *ptr == data;
1618}
1619
1620/**
1621 * devm_release_resource() - release a previously requested resource
1622 * @dev: device for which to release the resource
1623 * @new: descriptor of the resource to release
1624 *
1625 * Releases a resource previously requested using devm_request_resource().
1626 */
1627void devm_release_resource(struct device *dev, struct resource *new)
1628{
1629 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1630 new));
1631}
1632EXPORT_SYMBOL(devm_release_resource);
1633
1634struct region_devres {
1635 struct resource *parent;
1636 resource_size_t start;
1637 resource_size_t n;
1638};
1639
1640static void devm_region_release(struct device *dev, void *res)
1641{
1642 struct region_devres *this = res;
1643
1644 __release_region(this->parent, this->start, this->n);
1645}
1646
1647static int devm_region_match(struct device *dev, void *res, void *match_data)
1648{
1649 struct region_devres *this = res, *match = match_data;
1650
1651 return this->parent == match->parent &&
1652 this->start == match->start && this->n == match->n;
1653}
1654
1655struct resource *
1656__devm_request_region(struct device *dev, struct resource *parent,
1657 resource_size_t start, resource_size_t n, const char *name)
1658{
1659 struct region_devres *dr = NULL;
1660 struct resource *res;
1661
1662 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1663 GFP_KERNEL);
1664 if (!dr)
1665 return NULL;
1666
1667 dr->parent = parent;
1668 dr->start = start;
1669 dr->n = n;
1670
1671 res = __request_region(parent, start, n, name, 0);
1672 if (res)
1673 devres_add(dev, dr);
1674 else
1675 devres_free(dr);
1676
1677 return res;
1678}
1679EXPORT_SYMBOL(__devm_request_region);
1680
1681void __devm_release_region(struct device *dev, struct resource *parent,
1682 resource_size_t start, resource_size_t n)
1683{
1684 struct region_devres match_data = { parent, start, n };
1685
1686 __release_region(parent, start, n);
1687 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1688 &match_data));
1689}
1690EXPORT_SYMBOL(__devm_release_region);
1691
1692/*
1693 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1694 */
1695#define MAXRESERVE 4
1696static int __init reserve_setup(char *str)
1697{
1698 static int reserved;
1699 static struct resource reserve[MAXRESERVE];
1700
1701 for (;;) {
1702 unsigned int io_start, io_num;
1703 int x = reserved;
1704 struct resource *parent;
1705
1706 if (get_option(&str, &io_start) != 2)
1707 break;
1708 if (get_option(&str, &io_num) == 0)
1709 break;
1710 if (x < MAXRESERVE) {
1711 struct resource *res = reserve + x;
1712
1713 /*
1714 * If the region starts below 0x10000, we assume it's
1715 * I/O port space; otherwise assume it's memory.
1716 */
1717 if (io_start < 0x10000) {
1718 res->flags = IORESOURCE_IO;
1719 parent = &ioport_resource;
1720 } else {
1721 res->flags = IORESOURCE_MEM;
1722 parent = &iomem_resource;
1723 }
1724 res->name = "reserved";
1725 res->start = io_start;
1726 res->end = io_start + io_num - 1;
1727 res->flags |= IORESOURCE_BUSY;
1728 res->desc = IORES_DESC_NONE;
1729 res->child = NULL;
1730 if (request_resource(parent, res) == 0)
1731 reserved = x+1;
1732 }
1733 }
1734 return 1;
1735}
1736__setup("reserve=", reserve_setup);
1737
1738/*
1739 * Check if the requested addr and size spans more than any slot in the
1740 * iomem resource tree.
1741 */
1742int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1743{
1744 resource_size_t end = addr + size - 1;
1745 struct resource *p;
1746 int err = 0;
1747
1748 read_lock(&resource_lock);
1749 for_each_resource(&iomem_resource, p, false) {
1750 /*
1751 * We can probably skip the resources without
1752 * IORESOURCE_IO attribute?
1753 */
1754 if (p->start > end)
1755 continue;
1756 if (p->end < addr)
1757 continue;
1758 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1759 PFN_DOWN(p->end) >= PFN_DOWN(end))
1760 continue;
1761 /*
1762 * if a resource is "BUSY", it's not a hardware resource
1763 * but a driver mapping of such a resource; we don't want
1764 * to warn for those; some drivers legitimately map only
1765 * partial hardware resources. (example: vesafb)
1766 */
1767 if (p->flags & IORESOURCE_BUSY)
1768 continue;
1769
1770 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1771 &addr, &end, p->name, p);
1772 err = -1;
1773 break;
1774 }
1775 read_unlock(&resource_lock);
1776
1777 return err;
1778}
1779
1780#ifdef CONFIG_STRICT_DEVMEM
1781static int strict_iomem_checks = 1;
1782#else
1783static int strict_iomem_checks;
1784#endif
1785
1786/*
1787 * Check if an address is exclusive to the kernel and must not be mapped to
1788 * user space, for example, via /dev/mem.
1789 *
1790 * Returns true if exclusive to the kernel, otherwise returns false.
1791 */
1792bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1793{
1794 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1795 IORESOURCE_EXCLUSIVE;
1796 bool skip_children = false, err = false;
1797 struct resource *p;
1798
1799 read_lock(&resource_lock);
1800 for_each_resource(root, p, skip_children) {
1801 if (p->start >= addr + size)
1802 break;
1803 if (p->end < addr) {
1804 skip_children = true;
1805 continue;
1806 }
1807 skip_children = false;
1808
1809 /*
1810 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1811 * IORESOURCE_EXCLUSIVE is set, even if they
1812 * are not busy and even if "iomem=relaxed" is set. The
1813 * responsible driver dynamically adds/removes system RAM within
1814 * such an area and uncontrolled access is dangerous.
1815 */
1816 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1817 err = true;
1818 break;
1819 }
1820
1821 /*
1822 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1823 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1824 * resource is busy.
1825 */
1826 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1827 continue;
1828 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1829 || p->flags & IORESOURCE_EXCLUSIVE) {
1830 err = true;
1831 break;
1832 }
1833 }
1834 read_unlock(&resource_lock);
1835
1836 return err;
1837}
1838
1839bool iomem_is_exclusive(u64 addr)
1840{
1841 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1842 PAGE_SIZE);
1843}
1844
1845struct resource_entry *resource_list_create_entry(struct resource *res,
1846 size_t extra_size)
1847{
1848 struct resource_entry *entry;
1849
1850 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1851 if (entry) {
1852 INIT_LIST_HEAD(&entry->node);
1853 entry->res = res ? res : &entry->__res;
1854 }
1855
1856 return entry;
1857}
1858EXPORT_SYMBOL(resource_list_create_entry);
1859
1860void resource_list_free(struct list_head *head)
1861{
1862 struct resource_entry *entry, *tmp;
1863
1864 list_for_each_entry_safe(entry, tmp, head, node)
1865 resource_list_destroy_entry(entry);
1866}
1867EXPORT_SYMBOL(resource_list_free);
1868
1869#ifdef CONFIG_GET_FREE_REGION
1870#define GFR_DESCENDING (1UL << 0)
1871#define GFR_REQUEST_REGION (1UL << 1)
1872#ifdef PA_SECTION_SHIFT
1873#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1874#else
1875#define GFR_DEFAULT_ALIGN PAGE_SIZE
1876#endif
1877
1878static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1879 resource_size_t align, unsigned long flags)
1880{
1881 if (flags & GFR_DESCENDING) {
1882 resource_size_t end;
1883
1884 end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1885 return end - size + 1;
1886 }
1887
1888 return ALIGN(max(base->start, align), align);
1889}
1890
1891static bool gfr_continue(struct resource *base, resource_size_t addr,
1892 resource_size_t size, unsigned long flags)
1893{
1894 if (flags & GFR_DESCENDING)
1895 return addr > size && addr >= base->start;
1896 /*
1897 * In the ascend case be careful that the last increment by
1898 * @size did not wrap 0.
1899 */
1900 return addr > addr - size &&
1901 addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1902}
1903
1904static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1905 unsigned long flags)
1906{
1907 if (flags & GFR_DESCENDING)
1908 return addr - size;
1909 return addr + size;
1910}
1911
1912static void remove_free_mem_region(void *_res)
1913{
1914 struct resource *res = _res;
1915
1916 if (res->parent)
1917 remove_resource(res);
1918 free_resource(res);
1919}
1920
1921static struct resource *
1922get_free_mem_region(struct device *dev, struct resource *base,
1923 resource_size_t size, const unsigned long align,
1924 const char *name, const unsigned long desc,
1925 const unsigned long flags)
1926{
1927 resource_size_t addr;
1928 struct resource *res;
1929 struct region_devres *dr = NULL;
1930
1931 size = ALIGN(size, align);
1932
1933 res = alloc_resource(GFP_KERNEL);
1934 if (!res)
1935 return ERR_PTR(-ENOMEM);
1936
1937 if (dev && (flags & GFR_REQUEST_REGION)) {
1938 dr = devres_alloc(devm_region_release,
1939 sizeof(struct region_devres), GFP_KERNEL);
1940 if (!dr) {
1941 free_resource(res);
1942 return ERR_PTR(-ENOMEM);
1943 }
1944 } else if (dev) {
1945 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1946 return ERR_PTR(-ENOMEM);
1947 }
1948
1949 write_lock(&resource_lock);
1950 for (addr = gfr_start(base, size, align, flags);
1951 gfr_continue(base, addr, align, flags);
1952 addr = gfr_next(addr, align, flags)) {
1953 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1954 REGION_DISJOINT)
1955 continue;
1956
1957 if (flags & GFR_REQUEST_REGION) {
1958 if (__request_region_locked(res, &iomem_resource, addr,
1959 size, name, 0))
1960 break;
1961
1962 if (dev) {
1963 dr->parent = &iomem_resource;
1964 dr->start = addr;
1965 dr->n = size;
1966 devres_add(dev, dr);
1967 }
1968
1969 res->desc = desc;
1970 write_unlock(&resource_lock);
1971
1972
1973 /*
1974 * A driver is claiming this region so revoke any
1975 * mappings.
1976 */
1977 revoke_iomem(res);
1978 } else {
1979 res->start = addr;
1980 res->end = addr + size - 1;
1981 res->name = name;
1982 res->desc = desc;
1983 res->flags = IORESOURCE_MEM;
1984
1985 /*
1986 * Only succeed if the resource hosts an exclusive
1987 * range after the insert
1988 */
1989 if (__insert_resource(base, res) || res->child)
1990 break;
1991
1992 write_unlock(&resource_lock);
1993 }
1994
1995 return res;
1996 }
1997 write_unlock(&resource_lock);
1998
1999 if (flags & GFR_REQUEST_REGION) {
2000 free_resource(res);
2001 devres_free(dr);
2002 } else if (dev)
2003 devm_release_action(dev, remove_free_mem_region, res);
2004
2005 return ERR_PTR(-ERANGE);
2006}
2007
2008/**
2009 * devm_request_free_mem_region - find free region for device private memory
2010 *
2011 * @dev: device struct to bind the resource to
2012 * @size: size in bytes of the device memory to add
2013 * @base: resource tree to look in
2014 *
2015 * This function tries to find an empty range of physical address big enough to
2016 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2017 * memory, which in turn allocates struct pages.
2018 */
2019struct resource *devm_request_free_mem_region(struct device *dev,
2020 struct resource *base, unsigned long size)
2021{
2022 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2023
2024 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2025 dev_name(dev),
2026 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2027}
2028EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2029
2030struct resource *request_free_mem_region(struct resource *base,
2031 unsigned long size, const char *name)
2032{
2033 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2034
2035 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2036 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2037}
2038EXPORT_SYMBOL_GPL(request_free_mem_region);
2039
2040/**
2041 * alloc_free_mem_region - find a free region relative to @base
2042 * @base: resource that will parent the new resource
2043 * @size: size in bytes of memory to allocate from @base
2044 * @align: alignment requirements for the allocation
2045 * @name: resource name
2046 *
2047 * Buses like CXL, that can dynamically instantiate new memory regions,
2048 * need a method to allocate physical address space for those regions.
2049 * Allocate and insert a new resource to cover a free, unclaimed by a
2050 * descendant of @base, range in the span of @base.
2051 */
2052struct resource *alloc_free_mem_region(struct resource *base,
2053 unsigned long size, unsigned long align,
2054 const char *name)
2055{
2056 /* Default of ascending direction and insert resource */
2057 unsigned long flags = 0;
2058
2059 return get_free_mem_region(NULL, base, size, align, name,
2060 IORES_DESC_NONE, flags);
2061}
2062EXPORT_SYMBOL_GPL(alloc_free_mem_region);
2063#endif /* CONFIG_GET_FREE_REGION */
2064
2065static int __init strict_iomem(char *str)
2066{
2067 if (strstr(str, "relaxed"))
2068 strict_iomem_checks = 0;
2069 if (strstr(str, "strict"))
2070 strict_iomem_checks = 1;
2071 return 1;
2072}
2073
2074static int iomem_fs_init_fs_context(struct fs_context *fc)
2075{
2076 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2077}
2078
2079static struct file_system_type iomem_fs_type = {
2080 .name = "iomem",
2081 .owner = THIS_MODULE,
2082 .init_fs_context = iomem_fs_init_fs_context,
2083 .kill_sb = kill_anon_super,
2084};
2085
2086static int __init iomem_init_inode(void)
2087{
2088 static struct vfsmount *iomem_vfs_mount;
2089 static int iomem_fs_cnt;
2090 struct inode *inode;
2091 int rc;
2092
2093 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2094 if (rc < 0) {
2095 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2096 return rc;
2097 }
2098
2099 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2100 if (IS_ERR(inode)) {
2101 rc = PTR_ERR(inode);
2102 pr_err("Cannot allocate inode for iomem: %d\n", rc);
2103 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2104 return rc;
2105 }
2106
2107 /*
2108 * Publish iomem revocation inode initialized.
2109 * Pairs with smp_load_acquire() in revoke_iomem().
2110 */
2111 smp_store_release(&iomem_inode, inode);
2112
2113 return 0;
2114}
2115
2116fs_initcall(iomem_init_inode);
2117
2118__setup("iomem=", strict_iomem);