Loading...
1/*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/errno.h>
14#include <linux/ioport.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/proc_fs.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/device.h>
23#include <linux/pfn.h>
24#include <linux/mm.h>
25#include <asm/io.h>
26
27
28struct resource ioport_resource = {
29 .name = "PCI IO",
30 .start = 0,
31 .end = IO_SPACE_LIMIT,
32 .flags = IORESOURCE_IO,
33};
34EXPORT_SYMBOL(ioport_resource);
35
36struct resource iomem_resource = {
37 .name = "PCI mem",
38 .start = 0,
39 .end = -1,
40 .flags = IORESOURCE_MEM,
41};
42EXPORT_SYMBOL(iomem_resource);
43
44/* constraints to be met while allocating resources */
45struct resource_constraint {
46 resource_size_t min, max, align;
47 resource_size_t (*alignf)(void *, const struct resource *,
48 resource_size_t, resource_size_t);
49 void *alignf_data;
50};
51
52static DEFINE_RWLOCK(resource_lock);
53
54/*
55 * For memory hotplug, there is no way to free resource entries allocated
56 * by boot mem after the system is up. So for reusing the resource entry
57 * we need to remember the resource.
58 */
59static struct resource *bootmem_resource_free;
60static DEFINE_SPINLOCK(bootmem_resource_lock);
61
62static void *r_next(struct seq_file *m, void *v, loff_t *pos)
63{
64 struct resource *p = v;
65 (*pos)++;
66 if (p->child)
67 return p->child;
68 while (!p->sibling && p->parent)
69 p = p->parent;
70 return p->sibling;
71}
72
73#ifdef CONFIG_PROC_FS
74
75enum { MAX_IORES_LEVEL = 5 };
76
77static void *r_start(struct seq_file *m, loff_t *pos)
78 __acquires(resource_lock)
79{
80 struct resource *p = m->private;
81 loff_t l = 0;
82 read_lock(&resource_lock);
83 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
84 ;
85 return p;
86}
87
88static void r_stop(struct seq_file *m, void *v)
89 __releases(resource_lock)
90{
91 read_unlock(&resource_lock);
92}
93
94static int r_show(struct seq_file *m, void *v)
95{
96 struct resource *root = m->private;
97 struct resource *r = v, *p;
98 int width = root->end < 0x10000 ? 4 : 8;
99 int depth;
100
101 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
102 if (p->parent == root)
103 break;
104 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
105 depth * 2, "",
106 width, (unsigned long long) r->start,
107 width, (unsigned long long) r->end,
108 r->name ? r->name : "<BAD>");
109 return 0;
110}
111
112static const struct seq_operations resource_op = {
113 .start = r_start,
114 .next = r_next,
115 .stop = r_stop,
116 .show = r_show,
117};
118
119static int ioports_open(struct inode *inode, struct file *file)
120{
121 int res = seq_open(file, &resource_op);
122 if (!res) {
123 struct seq_file *m = file->private_data;
124 m->private = &ioport_resource;
125 }
126 return res;
127}
128
129static int iomem_open(struct inode *inode, struct file *file)
130{
131 int res = seq_open(file, &resource_op);
132 if (!res) {
133 struct seq_file *m = file->private_data;
134 m->private = &iomem_resource;
135 }
136 return res;
137}
138
139static const struct file_operations proc_ioports_operations = {
140 .open = ioports_open,
141 .read = seq_read,
142 .llseek = seq_lseek,
143 .release = seq_release,
144};
145
146static const struct file_operations proc_iomem_operations = {
147 .open = iomem_open,
148 .read = seq_read,
149 .llseek = seq_lseek,
150 .release = seq_release,
151};
152
153static int __init ioresources_init(void)
154{
155 proc_create("ioports", 0, NULL, &proc_ioports_operations);
156 proc_create("iomem", 0, NULL, &proc_iomem_operations);
157 return 0;
158}
159__initcall(ioresources_init);
160
161#endif /* CONFIG_PROC_FS */
162
163static void free_resource(struct resource *res)
164{
165 if (!res)
166 return;
167
168 if (!PageSlab(virt_to_head_page(res))) {
169 spin_lock(&bootmem_resource_lock);
170 res->sibling = bootmem_resource_free;
171 bootmem_resource_free = res;
172 spin_unlock(&bootmem_resource_lock);
173 } else {
174 kfree(res);
175 }
176}
177
178static struct resource *alloc_resource(gfp_t flags)
179{
180 struct resource *res = NULL;
181
182 spin_lock(&bootmem_resource_lock);
183 if (bootmem_resource_free) {
184 res = bootmem_resource_free;
185 bootmem_resource_free = res->sibling;
186 }
187 spin_unlock(&bootmem_resource_lock);
188
189 if (res)
190 memset(res, 0, sizeof(struct resource));
191 else
192 res = kzalloc(sizeof(struct resource), flags);
193
194 return res;
195}
196
197/* Return the conflict entry if you can't request it */
198static struct resource * __request_resource(struct resource *root, struct resource *new)
199{
200 resource_size_t start = new->start;
201 resource_size_t end = new->end;
202 struct resource *tmp, **p;
203
204 if (end < start)
205 return root;
206 if (start < root->start)
207 return root;
208 if (end > root->end)
209 return root;
210 p = &root->child;
211 for (;;) {
212 tmp = *p;
213 if (!tmp || tmp->start > end) {
214 new->sibling = tmp;
215 *p = new;
216 new->parent = root;
217 return NULL;
218 }
219 p = &tmp->sibling;
220 if (tmp->end < start)
221 continue;
222 return tmp;
223 }
224}
225
226static int __release_resource(struct resource *old)
227{
228 struct resource *tmp, **p;
229
230 p = &old->parent->child;
231 for (;;) {
232 tmp = *p;
233 if (!tmp)
234 break;
235 if (tmp == old) {
236 *p = tmp->sibling;
237 old->parent = NULL;
238 return 0;
239 }
240 p = &tmp->sibling;
241 }
242 return -EINVAL;
243}
244
245static void __release_child_resources(struct resource *r)
246{
247 struct resource *tmp, *p;
248 resource_size_t size;
249
250 p = r->child;
251 r->child = NULL;
252 while (p) {
253 tmp = p;
254 p = p->sibling;
255
256 tmp->parent = NULL;
257 tmp->sibling = NULL;
258 __release_child_resources(tmp);
259
260 printk(KERN_DEBUG "release child resource %pR\n", tmp);
261 /* need to restore size, and keep flags */
262 size = resource_size(tmp);
263 tmp->start = 0;
264 tmp->end = size - 1;
265 }
266}
267
268void release_child_resources(struct resource *r)
269{
270 write_lock(&resource_lock);
271 __release_child_resources(r);
272 write_unlock(&resource_lock);
273}
274
275/**
276 * request_resource_conflict - request and reserve an I/O or memory resource
277 * @root: root resource descriptor
278 * @new: resource descriptor desired by caller
279 *
280 * Returns 0 for success, conflict resource on error.
281 */
282struct resource *request_resource_conflict(struct resource *root, struct resource *new)
283{
284 struct resource *conflict;
285
286 write_lock(&resource_lock);
287 conflict = __request_resource(root, new);
288 write_unlock(&resource_lock);
289 return conflict;
290}
291
292/**
293 * request_resource - request and reserve an I/O or memory resource
294 * @root: root resource descriptor
295 * @new: resource descriptor desired by caller
296 *
297 * Returns 0 for success, negative error code on error.
298 */
299int request_resource(struct resource *root, struct resource *new)
300{
301 struct resource *conflict;
302
303 conflict = request_resource_conflict(root, new);
304 return conflict ? -EBUSY : 0;
305}
306
307EXPORT_SYMBOL(request_resource);
308
309/**
310 * release_resource - release a previously reserved resource
311 * @old: resource pointer
312 */
313int release_resource(struct resource *old)
314{
315 int retval;
316
317 write_lock(&resource_lock);
318 retval = __release_resource(old);
319 write_unlock(&resource_lock);
320 return retval;
321}
322
323EXPORT_SYMBOL(release_resource);
324
325#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
326/*
327 * Finds the lowest memory reosurce exists within [res->start.res->end)
328 * the caller must specify res->start, res->end, res->flags and "name".
329 * If found, returns 0, res is overwritten, if not found, returns -1.
330 */
331static int find_next_system_ram(struct resource *res, char *name)
332{
333 resource_size_t start, end;
334 struct resource *p;
335
336 BUG_ON(!res);
337
338 start = res->start;
339 end = res->end;
340 BUG_ON(start >= end);
341
342 read_lock(&resource_lock);
343 for (p = iomem_resource.child; p ; p = p->sibling) {
344 /* system ram is just marked as IORESOURCE_MEM */
345 if (p->flags != res->flags)
346 continue;
347 if (name && strcmp(p->name, name))
348 continue;
349 if (p->start > end) {
350 p = NULL;
351 break;
352 }
353 if ((p->end >= start) && (p->start < end))
354 break;
355 }
356 read_unlock(&resource_lock);
357 if (!p)
358 return -1;
359 /* copy data */
360 if (res->start < p->start)
361 res->start = p->start;
362 if (res->end > p->end)
363 res->end = p->end;
364 return 0;
365}
366
367/*
368 * This function calls callback against all memory range of "System RAM"
369 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
370 * Now, this function is only for "System RAM".
371 */
372int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
373 void *arg, int (*func)(unsigned long, unsigned long, void *))
374{
375 struct resource res;
376 unsigned long pfn, end_pfn;
377 u64 orig_end;
378 int ret = -1;
379
380 res.start = (u64) start_pfn << PAGE_SHIFT;
381 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
382 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
383 orig_end = res.end;
384 while ((res.start < res.end) &&
385 (find_next_system_ram(&res, "System RAM") >= 0)) {
386 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
387 end_pfn = (res.end + 1) >> PAGE_SHIFT;
388 if (end_pfn > pfn)
389 ret = (*func)(pfn, end_pfn - pfn, arg);
390 if (ret)
391 break;
392 res.start = res.end + 1;
393 res.end = orig_end;
394 }
395 return ret;
396}
397
398#endif
399
400static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
401{
402 return 1;
403}
404/*
405 * This generic page_is_ram() returns true if specified address is
406 * registered as "System RAM" in iomem_resource list.
407 */
408int __weak page_is_ram(unsigned long pfn)
409{
410 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
411}
412EXPORT_SYMBOL_GPL(page_is_ram);
413
414void __weak arch_remove_reservations(struct resource *avail)
415{
416}
417
418static resource_size_t simple_align_resource(void *data,
419 const struct resource *avail,
420 resource_size_t size,
421 resource_size_t align)
422{
423 return avail->start;
424}
425
426static void resource_clip(struct resource *res, resource_size_t min,
427 resource_size_t max)
428{
429 if (res->start < min)
430 res->start = min;
431 if (res->end > max)
432 res->end = max;
433}
434
435/*
436 * Find empty slot in the resource tree with the given range and
437 * alignment constraints
438 */
439static int __find_resource(struct resource *root, struct resource *old,
440 struct resource *new,
441 resource_size_t size,
442 struct resource_constraint *constraint)
443{
444 struct resource *this = root->child;
445 struct resource tmp = *new, avail, alloc;
446
447 tmp.start = root->start;
448 /*
449 * Skip past an allocated resource that starts at 0, since the assignment
450 * of this->start - 1 to tmp->end below would cause an underflow.
451 */
452 if (this && this->start == root->start) {
453 tmp.start = (this == old) ? old->start : this->end + 1;
454 this = this->sibling;
455 }
456 for(;;) {
457 if (this)
458 tmp.end = (this == old) ? this->end : this->start - 1;
459 else
460 tmp.end = root->end;
461
462 if (tmp.end < tmp.start)
463 goto next;
464
465 resource_clip(&tmp, constraint->min, constraint->max);
466 arch_remove_reservations(&tmp);
467
468 /* Check for overflow after ALIGN() */
469 avail.start = ALIGN(tmp.start, constraint->align);
470 avail.end = tmp.end;
471 avail.flags = new->flags & ~IORESOURCE_UNSET;
472 if (avail.start >= tmp.start) {
473 alloc.flags = avail.flags;
474 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
475 size, constraint->align);
476 alloc.end = alloc.start + size - 1;
477 if (resource_contains(&avail, &alloc)) {
478 new->start = alloc.start;
479 new->end = alloc.end;
480 return 0;
481 }
482 }
483
484next: if (!this || this->end == root->end)
485 break;
486
487 if (this != old)
488 tmp.start = this->end + 1;
489 this = this->sibling;
490 }
491 return -EBUSY;
492}
493
494/*
495 * Find empty slot in the resource tree given range and alignment.
496 */
497static int find_resource(struct resource *root, struct resource *new,
498 resource_size_t size,
499 struct resource_constraint *constraint)
500{
501 return __find_resource(root, NULL, new, size, constraint);
502}
503
504/**
505 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
506 * The resource will be relocated if the new size cannot be reallocated in the
507 * current location.
508 *
509 * @root: root resource descriptor
510 * @old: resource descriptor desired by caller
511 * @newsize: new size of the resource descriptor
512 * @constraint: the size and alignment constraints to be met.
513 */
514static int reallocate_resource(struct resource *root, struct resource *old,
515 resource_size_t newsize,
516 struct resource_constraint *constraint)
517{
518 int err=0;
519 struct resource new = *old;
520 struct resource *conflict;
521
522 write_lock(&resource_lock);
523
524 if ((err = __find_resource(root, old, &new, newsize, constraint)))
525 goto out;
526
527 if (resource_contains(&new, old)) {
528 old->start = new.start;
529 old->end = new.end;
530 goto out;
531 }
532
533 if (old->child) {
534 err = -EBUSY;
535 goto out;
536 }
537
538 if (resource_contains(old, &new)) {
539 old->start = new.start;
540 old->end = new.end;
541 } else {
542 __release_resource(old);
543 *old = new;
544 conflict = __request_resource(root, old);
545 BUG_ON(conflict);
546 }
547out:
548 write_unlock(&resource_lock);
549 return err;
550}
551
552
553/**
554 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
555 * The resource will be reallocated with a new size if it was already allocated
556 * @root: root resource descriptor
557 * @new: resource descriptor desired by caller
558 * @size: requested resource region size
559 * @min: minimum boundary to allocate
560 * @max: maximum boundary to allocate
561 * @align: alignment requested, in bytes
562 * @alignf: alignment function, optional, called if not NULL
563 * @alignf_data: arbitrary data to pass to the @alignf function
564 */
565int allocate_resource(struct resource *root, struct resource *new,
566 resource_size_t size, resource_size_t min,
567 resource_size_t max, resource_size_t align,
568 resource_size_t (*alignf)(void *,
569 const struct resource *,
570 resource_size_t,
571 resource_size_t),
572 void *alignf_data)
573{
574 int err;
575 struct resource_constraint constraint;
576
577 if (!alignf)
578 alignf = simple_align_resource;
579
580 constraint.min = min;
581 constraint.max = max;
582 constraint.align = align;
583 constraint.alignf = alignf;
584 constraint.alignf_data = alignf_data;
585
586 if ( new->parent ) {
587 /* resource is already allocated, try reallocating with
588 the new constraints */
589 return reallocate_resource(root, new, size, &constraint);
590 }
591
592 write_lock(&resource_lock);
593 err = find_resource(root, new, size, &constraint);
594 if (err >= 0 && __request_resource(root, new))
595 err = -EBUSY;
596 write_unlock(&resource_lock);
597 return err;
598}
599
600EXPORT_SYMBOL(allocate_resource);
601
602/**
603 * lookup_resource - find an existing resource by a resource start address
604 * @root: root resource descriptor
605 * @start: resource start address
606 *
607 * Returns a pointer to the resource if found, NULL otherwise
608 */
609struct resource *lookup_resource(struct resource *root, resource_size_t start)
610{
611 struct resource *res;
612
613 read_lock(&resource_lock);
614 for (res = root->child; res; res = res->sibling) {
615 if (res->start == start)
616 break;
617 }
618 read_unlock(&resource_lock);
619
620 return res;
621}
622
623/*
624 * Insert a resource into the resource tree. If successful, return NULL,
625 * otherwise return the conflicting resource (compare to __request_resource())
626 */
627static struct resource * __insert_resource(struct resource *parent, struct resource *new)
628{
629 struct resource *first, *next;
630
631 for (;; parent = first) {
632 first = __request_resource(parent, new);
633 if (!first)
634 return first;
635
636 if (first == parent)
637 return first;
638 if (WARN_ON(first == new)) /* duplicated insertion */
639 return first;
640
641 if ((first->start > new->start) || (first->end < new->end))
642 break;
643 if ((first->start == new->start) && (first->end == new->end))
644 break;
645 }
646
647 for (next = first; ; next = next->sibling) {
648 /* Partial overlap? Bad, and unfixable */
649 if (next->start < new->start || next->end > new->end)
650 return next;
651 if (!next->sibling)
652 break;
653 if (next->sibling->start > new->end)
654 break;
655 }
656
657 new->parent = parent;
658 new->sibling = next->sibling;
659 new->child = first;
660
661 next->sibling = NULL;
662 for (next = first; next; next = next->sibling)
663 next->parent = new;
664
665 if (parent->child == first) {
666 parent->child = new;
667 } else {
668 next = parent->child;
669 while (next->sibling != first)
670 next = next->sibling;
671 next->sibling = new;
672 }
673 return NULL;
674}
675
676/**
677 * insert_resource_conflict - Inserts resource in the resource tree
678 * @parent: parent of the new resource
679 * @new: new resource to insert
680 *
681 * Returns 0 on success, conflict resource if the resource can't be inserted.
682 *
683 * This function is equivalent to request_resource_conflict when no conflict
684 * happens. If a conflict happens, and the conflicting resources
685 * entirely fit within the range of the new resource, then the new
686 * resource is inserted and the conflicting resources become children of
687 * the new resource.
688 */
689struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
690{
691 struct resource *conflict;
692
693 write_lock(&resource_lock);
694 conflict = __insert_resource(parent, new);
695 write_unlock(&resource_lock);
696 return conflict;
697}
698
699/**
700 * insert_resource - Inserts a resource in the resource tree
701 * @parent: parent of the new resource
702 * @new: new resource to insert
703 *
704 * Returns 0 on success, -EBUSY if the resource can't be inserted.
705 */
706int insert_resource(struct resource *parent, struct resource *new)
707{
708 struct resource *conflict;
709
710 conflict = insert_resource_conflict(parent, new);
711 return conflict ? -EBUSY : 0;
712}
713
714/**
715 * insert_resource_expand_to_fit - Insert a resource into the resource tree
716 * @root: root resource descriptor
717 * @new: new resource to insert
718 *
719 * Insert a resource into the resource tree, possibly expanding it in order
720 * to make it encompass any conflicting resources.
721 */
722void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
723{
724 if (new->parent)
725 return;
726
727 write_lock(&resource_lock);
728 for (;;) {
729 struct resource *conflict;
730
731 conflict = __insert_resource(root, new);
732 if (!conflict)
733 break;
734 if (conflict == root)
735 break;
736
737 /* Ok, expand resource to cover the conflict, then try again .. */
738 if (conflict->start < new->start)
739 new->start = conflict->start;
740 if (conflict->end > new->end)
741 new->end = conflict->end;
742
743 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
744 }
745 write_unlock(&resource_lock);
746}
747
748static int __adjust_resource(struct resource *res, resource_size_t start,
749 resource_size_t size)
750{
751 struct resource *tmp, *parent = res->parent;
752 resource_size_t end = start + size - 1;
753 int result = -EBUSY;
754
755 if (!parent)
756 goto skip;
757
758 if ((start < parent->start) || (end > parent->end))
759 goto out;
760
761 if (res->sibling && (res->sibling->start <= end))
762 goto out;
763
764 tmp = parent->child;
765 if (tmp != res) {
766 while (tmp->sibling != res)
767 tmp = tmp->sibling;
768 if (start <= tmp->end)
769 goto out;
770 }
771
772skip:
773 for (tmp = res->child; tmp; tmp = tmp->sibling)
774 if ((tmp->start < start) || (tmp->end > end))
775 goto out;
776
777 res->start = start;
778 res->end = end;
779 result = 0;
780
781 out:
782 return result;
783}
784
785/**
786 * adjust_resource - modify a resource's start and size
787 * @res: resource to modify
788 * @start: new start value
789 * @size: new size
790 *
791 * Given an existing resource, change its start and size to match the
792 * arguments. Returns 0 on success, -EBUSY if it can't fit.
793 * Existing children of the resource are assumed to be immutable.
794 */
795int adjust_resource(struct resource *res, resource_size_t start,
796 resource_size_t size)
797{
798 int result;
799
800 write_lock(&resource_lock);
801 result = __adjust_resource(res, start, size);
802 write_unlock(&resource_lock);
803 return result;
804}
805EXPORT_SYMBOL(adjust_resource);
806
807static void __init __reserve_region_with_split(struct resource *root,
808 resource_size_t start, resource_size_t end,
809 const char *name)
810{
811 struct resource *parent = root;
812 struct resource *conflict;
813 struct resource *res = alloc_resource(GFP_ATOMIC);
814 struct resource *next_res = NULL;
815
816 if (!res)
817 return;
818
819 res->name = name;
820 res->start = start;
821 res->end = end;
822 res->flags = IORESOURCE_BUSY;
823
824 while (1) {
825
826 conflict = __request_resource(parent, res);
827 if (!conflict) {
828 if (!next_res)
829 break;
830 res = next_res;
831 next_res = NULL;
832 continue;
833 }
834
835 /* conflict covered whole area */
836 if (conflict->start <= res->start &&
837 conflict->end >= res->end) {
838 free_resource(res);
839 WARN_ON(next_res);
840 break;
841 }
842
843 /* failed, split and try again */
844 if (conflict->start > res->start) {
845 end = res->end;
846 res->end = conflict->start - 1;
847 if (conflict->end < end) {
848 next_res = alloc_resource(GFP_ATOMIC);
849 if (!next_res) {
850 free_resource(res);
851 break;
852 }
853 next_res->name = name;
854 next_res->start = conflict->end + 1;
855 next_res->end = end;
856 next_res->flags = IORESOURCE_BUSY;
857 }
858 } else {
859 res->start = conflict->end + 1;
860 }
861 }
862
863}
864
865void __init reserve_region_with_split(struct resource *root,
866 resource_size_t start, resource_size_t end,
867 const char *name)
868{
869 int abort = 0;
870
871 write_lock(&resource_lock);
872 if (root->start > start || root->end < end) {
873 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
874 (unsigned long long)start, (unsigned long long)end,
875 root);
876 if (start > root->end || end < root->start)
877 abort = 1;
878 else {
879 if (end > root->end)
880 end = root->end;
881 if (start < root->start)
882 start = root->start;
883 pr_err("fixing request to [0x%llx-0x%llx]\n",
884 (unsigned long long)start,
885 (unsigned long long)end);
886 }
887 dump_stack();
888 }
889 if (!abort)
890 __reserve_region_with_split(root, start, end, name);
891 write_unlock(&resource_lock);
892}
893
894/**
895 * resource_alignment - calculate resource's alignment
896 * @res: resource pointer
897 *
898 * Returns alignment on success, 0 (invalid alignment) on failure.
899 */
900resource_size_t resource_alignment(struct resource *res)
901{
902 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
903 case IORESOURCE_SIZEALIGN:
904 return resource_size(res);
905 case IORESOURCE_STARTALIGN:
906 return res->start;
907 default:
908 return 0;
909 }
910}
911
912/*
913 * This is compatibility stuff for IO resources.
914 *
915 * Note how this, unlike the above, knows about
916 * the IO flag meanings (busy etc).
917 *
918 * request_region creates a new busy region.
919 *
920 * check_region returns non-zero if the area is already busy.
921 *
922 * release_region releases a matching busy region.
923 */
924
925static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
926
927/**
928 * __request_region - create a new busy resource region
929 * @parent: parent resource descriptor
930 * @start: resource start address
931 * @n: resource region size
932 * @name: reserving caller's ID string
933 * @flags: IO resource flags
934 */
935struct resource * __request_region(struct resource *parent,
936 resource_size_t start, resource_size_t n,
937 const char *name, int flags)
938{
939 DECLARE_WAITQUEUE(wait, current);
940 struct resource *res = alloc_resource(GFP_KERNEL);
941
942 if (!res)
943 return NULL;
944
945 res->name = name;
946 res->start = start;
947 res->end = start + n - 1;
948 res->flags = resource_type(parent);
949 res->flags |= IORESOURCE_BUSY | flags;
950
951 write_lock(&resource_lock);
952
953 for (;;) {
954 struct resource *conflict;
955
956 conflict = __request_resource(parent, res);
957 if (!conflict)
958 break;
959 if (conflict != parent) {
960 parent = conflict;
961 if (!(conflict->flags & IORESOURCE_BUSY))
962 continue;
963 }
964 if (conflict->flags & flags & IORESOURCE_MUXED) {
965 add_wait_queue(&muxed_resource_wait, &wait);
966 write_unlock(&resource_lock);
967 set_current_state(TASK_UNINTERRUPTIBLE);
968 schedule();
969 remove_wait_queue(&muxed_resource_wait, &wait);
970 write_lock(&resource_lock);
971 continue;
972 }
973 /* Uhhuh, that didn't work out.. */
974 free_resource(res);
975 res = NULL;
976 break;
977 }
978 write_unlock(&resource_lock);
979 return res;
980}
981EXPORT_SYMBOL(__request_region);
982
983/**
984 * __check_region - check if a resource region is busy or free
985 * @parent: parent resource descriptor
986 * @start: resource start address
987 * @n: resource region size
988 *
989 * Returns 0 if the region is free at the moment it is checked,
990 * returns %-EBUSY if the region is busy.
991 *
992 * NOTE:
993 * This function is deprecated because its use is racy.
994 * Even if it returns 0, a subsequent call to request_region()
995 * may fail because another driver etc. just allocated the region.
996 * Do NOT use it. It will be removed from the kernel.
997 */
998int __check_region(struct resource *parent, resource_size_t start,
999 resource_size_t n)
1000{
1001 struct resource * res;
1002
1003 res = __request_region(parent, start, n, "check-region", 0);
1004 if (!res)
1005 return -EBUSY;
1006
1007 release_resource(res);
1008 free_resource(res);
1009 return 0;
1010}
1011EXPORT_SYMBOL(__check_region);
1012
1013/**
1014 * __release_region - release a previously reserved resource region
1015 * @parent: parent resource descriptor
1016 * @start: resource start address
1017 * @n: resource region size
1018 *
1019 * The described resource region must match a currently busy region.
1020 */
1021void __release_region(struct resource *parent, resource_size_t start,
1022 resource_size_t n)
1023{
1024 struct resource **p;
1025 resource_size_t end;
1026
1027 p = &parent->child;
1028 end = start + n - 1;
1029
1030 write_lock(&resource_lock);
1031
1032 for (;;) {
1033 struct resource *res = *p;
1034
1035 if (!res)
1036 break;
1037 if (res->start <= start && res->end >= end) {
1038 if (!(res->flags & IORESOURCE_BUSY)) {
1039 p = &res->child;
1040 continue;
1041 }
1042 if (res->start != start || res->end != end)
1043 break;
1044 *p = res->sibling;
1045 write_unlock(&resource_lock);
1046 if (res->flags & IORESOURCE_MUXED)
1047 wake_up(&muxed_resource_wait);
1048 free_resource(res);
1049 return;
1050 }
1051 p = &res->sibling;
1052 }
1053
1054 write_unlock(&resource_lock);
1055
1056 printk(KERN_WARNING "Trying to free nonexistent resource "
1057 "<%016llx-%016llx>\n", (unsigned long long)start,
1058 (unsigned long long)end);
1059}
1060EXPORT_SYMBOL(__release_region);
1061
1062#ifdef CONFIG_MEMORY_HOTREMOVE
1063/**
1064 * release_mem_region_adjustable - release a previously reserved memory region
1065 * @parent: parent resource descriptor
1066 * @start: resource start address
1067 * @size: resource region size
1068 *
1069 * This interface is intended for memory hot-delete. The requested region
1070 * is released from a currently busy memory resource. The requested region
1071 * must either match exactly or fit into a single busy resource entry. In
1072 * the latter case, the remaining resource is adjusted accordingly.
1073 * Existing children of the busy memory resource must be immutable in the
1074 * request.
1075 *
1076 * Note:
1077 * - Additional release conditions, such as overlapping region, can be
1078 * supported after they are confirmed as valid cases.
1079 * - When a busy memory resource gets split into two entries, the code
1080 * assumes that all children remain in the lower address entry for
1081 * simplicity. Enhance this logic when necessary.
1082 */
1083int release_mem_region_adjustable(struct resource *parent,
1084 resource_size_t start, resource_size_t size)
1085{
1086 struct resource **p;
1087 struct resource *res;
1088 struct resource *new_res;
1089 resource_size_t end;
1090 int ret = -EINVAL;
1091
1092 end = start + size - 1;
1093 if ((start < parent->start) || (end > parent->end))
1094 return ret;
1095
1096 /* The alloc_resource() result gets checked later */
1097 new_res = alloc_resource(GFP_KERNEL);
1098
1099 p = &parent->child;
1100 write_lock(&resource_lock);
1101
1102 while ((res = *p)) {
1103 if (res->start >= end)
1104 break;
1105
1106 /* look for the next resource if it does not fit into */
1107 if (res->start > start || res->end < end) {
1108 p = &res->sibling;
1109 continue;
1110 }
1111
1112 if (!(res->flags & IORESOURCE_MEM))
1113 break;
1114
1115 if (!(res->flags & IORESOURCE_BUSY)) {
1116 p = &res->child;
1117 continue;
1118 }
1119
1120 /* found the target resource; let's adjust accordingly */
1121 if (res->start == start && res->end == end) {
1122 /* free the whole entry */
1123 *p = res->sibling;
1124 free_resource(res);
1125 ret = 0;
1126 } else if (res->start == start && res->end != end) {
1127 /* adjust the start */
1128 ret = __adjust_resource(res, end + 1,
1129 res->end - end);
1130 } else if (res->start != start && res->end == end) {
1131 /* adjust the end */
1132 ret = __adjust_resource(res, res->start,
1133 start - res->start);
1134 } else {
1135 /* split into two entries */
1136 if (!new_res) {
1137 ret = -ENOMEM;
1138 break;
1139 }
1140 new_res->name = res->name;
1141 new_res->start = end + 1;
1142 new_res->end = res->end;
1143 new_res->flags = res->flags;
1144 new_res->parent = res->parent;
1145 new_res->sibling = res->sibling;
1146 new_res->child = NULL;
1147
1148 ret = __adjust_resource(res, res->start,
1149 start - res->start);
1150 if (ret)
1151 break;
1152 res->sibling = new_res;
1153 new_res = NULL;
1154 }
1155
1156 break;
1157 }
1158
1159 write_unlock(&resource_lock);
1160 free_resource(new_res);
1161 return ret;
1162}
1163#endif /* CONFIG_MEMORY_HOTREMOVE */
1164
1165/*
1166 * Managed region resource
1167 */
1168struct region_devres {
1169 struct resource *parent;
1170 resource_size_t start;
1171 resource_size_t n;
1172};
1173
1174static void devm_region_release(struct device *dev, void *res)
1175{
1176 struct region_devres *this = res;
1177
1178 __release_region(this->parent, this->start, this->n);
1179}
1180
1181static int devm_region_match(struct device *dev, void *res, void *match_data)
1182{
1183 struct region_devres *this = res, *match = match_data;
1184
1185 return this->parent == match->parent &&
1186 this->start == match->start && this->n == match->n;
1187}
1188
1189struct resource * __devm_request_region(struct device *dev,
1190 struct resource *parent, resource_size_t start,
1191 resource_size_t n, const char *name)
1192{
1193 struct region_devres *dr = NULL;
1194 struct resource *res;
1195
1196 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1197 GFP_KERNEL);
1198 if (!dr)
1199 return NULL;
1200
1201 dr->parent = parent;
1202 dr->start = start;
1203 dr->n = n;
1204
1205 res = __request_region(parent, start, n, name, 0);
1206 if (res)
1207 devres_add(dev, dr);
1208 else
1209 devres_free(dr);
1210
1211 return res;
1212}
1213EXPORT_SYMBOL(__devm_request_region);
1214
1215void __devm_release_region(struct device *dev, struct resource *parent,
1216 resource_size_t start, resource_size_t n)
1217{
1218 struct region_devres match_data = { parent, start, n };
1219
1220 __release_region(parent, start, n);
1221 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1222 &match_data));
1223}
1224EXPORT_SYMBOL(__devm_release_region);
1225
1226/*
1227 * Called from init/main.c to reserve IO ports.
1228 */
1229#define MAXRESERVE 4
1230static int __init reserve_setup(char *str)
1231{
1232 static int reserved;
1233 static struct resource reserve[MAXRESERVE];
1234
1235 for (;;) {
1236 unsigned int io_start, io_num;
1237 int x = reserved;
1238
1239 if (get_option (&str, &io_start) != 2)
1240 break;
1241 if (get_option (&str, &io_num) == 0)
1242 break;
1243 if (x < MAXRESERVE) {
1244 struct resource *res = reserve + x;
1245 res->name = "reserved";
1246 res->start = io_start;
1247 res->end = io_start + io_num - 1;
1248 res->flags = IORESOURCE_BUSY;
1249 res->child = NULL;
1250 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
1251 reserved = x+1;
1252 }
1253 }
1254 return 1;
1255}
1256
1257__setup("reserve=", reserve_setup);
1258
1259/*
1260 * Check if the requested addr and size spans more than any slot in the
1261 * iomem resource tree.
1262 */
1263int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1264{
1265 struct resource *p = &iomem_resource;
1266 int err = 0;
1267 loff_t l;
1268
1269 read_lock(&resource_lock);
1270 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1271 /*
1272 * We can probably skip the resources without
1273 * IORESOURCE_IO attribute?
1274 */
1275 if (p->start >= addr + size)
1276 continue;
1277 if (p->end < addr)
1278 continue;
1279 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1280 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1281 continue;
1282 /*
1283 * if a resource is "BUSY", it's not a hardware resource
1284 * but a driver mapping of such a resource; we don't want
1285 * to warn for those; some drivers legitimately map only
1286 * partial hardware resources. (example: vesafb)
1287 */
1288 if (p->flags & IORESOURCE_BUSY)
1289 continue;
1290
1291 printk(KERN_WARNING "resource map sanity check conflict: "
1292 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
1293 (unsigned long long)addr,
1294 (unsigned long long)(addr + size - 1),
1295 (unsigned long long)p->start,
1296 (unsigned long long)p->end,
1297 p->name);
1298 err = -1;
1299 break;
1300 }
1301 read_unlock(&resource_lock);
1302
1303 return err;
1304}
1305
1306#ifdef CONFIG_STRICT_DEVMEM
1307static int strict_iomem_checks = 1;
1308#else
1309static int strict_iomem_checks;
1310#endif
1311
1312/*
1313 * check if an address is reserved in the iomem resource tree
1314 * returns 1 if reserved, 0 if not reserved.
1315 */
1316int iomem_is_exclusive(u64 addr)
1317{
1318 struct resource *p = &iomem_resource;
1319 int err = 0;
1320 loff_t l;
1321 int size = PAGE_SIZE;
1322
1323 if (!strict_iomem_checks)
1324 return 0;
1325
1326 addr = addr & PAGE_MASK;
1327
1328 read_lock(&resource_lock);
1329 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1330 /*
1331 * We can probably skip the resources without
1332 * IORESOURCE_IO attribute?
1333 */
1334 if (p->start >= addr + size)
1335 break;
1336 if (p->end < addr)
1337 continue;
1338 if (p->flags & IORESOURCE_BUSY &&
1339 p->flags & IORESOURCE_EXCLUSIVE) {
1340 err = 1;
1341 break;
1342 }
1343 }
1344 read_unlock(&resource_lock);
1345
1346 return err;
1347}
1348
1349static int __init strict_iomem(char *str)
1350{
1351 if (strstr(str, "relaxed"))
1352 strict_iomem_checks = 0;
1353 if (strstr(str, "strict"))
1354 strict_iomem_checks = 1;
1355 return 1;
1356}
1357
1358__setup("iomem=", strict_iomem);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/resource.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
7 *
8 * Arbitrary resource management.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/export.h>
14#include <linux/errno.h>
15#include <linux/ioport.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/proc_fs.h>
21#include <linux/pseudo_fs.h>
22#include <linux/sched.h>
23#include <linux/seq_file.h>
24#include <linux/device.h>
25#include <linux/pfn.h>
26#include <linux/mm.h>
27#include <linux/mount.h>
28#include <linux/resource_ext.h>
29#include <uapi/linux/magic.h>
30#include <linux/string.h>
31#include <linux/vmalloc.h>
32#include <asm/io.h>
33
34
35struct resource ioport_resource = {
36 .name = "PCI IO",
37 .start = 0,
38 .end = IO_SPACE_LIMIT,
39 .flags = IORESOURCE_IO,
40};
41EXPORT_SYMBOL(ioport_resource);
42
43struct resource iomem_resource = {
44 .name = "PCI mem",
45 .start = 0,
46 .end = -1,
47 .flags = IORESOURCE_MEM,
48};
49EXPORT_SYMBOL(iomem_resource);
50
51static DEFINE_RWLOCK(resource_lock);
52
53/*
54 * Return the next node of @p in pre-order tree traversal. If
55 * @skip_children is true, skip the descendant nodes of @p in
56 * traversal. If @p is a descendant of @subtree_root, only traverse
57 * the subtree under @subtree_root.
58 */
59static struct resource *next_resource(struct resource *p, bool skip_children,
60 struct resource *subtree_root)
61{
62 if (!skip_children && p->child)
63 return p->child;
64 while (!p->sibling && p->parent) {
65 p = p->parent;
66 if (p == subtree_root)
67 return NULL;
68 }
69 return p->sibling;
70}
71
72/*
73 * Traverse the resource subtree under @_root in pre-order, excluding
74 * @_root itself.
75 *
76 * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop.
77 * And it is referenced to avoid unused variable warning.
78 */
79#define for_each_resource(_root, _p, _skip_children) \
80 for (typeof(_root) __root = (_root), __p = _p = __root->child; \
81 __p && _p; _p = next_resource(_p, _skip_children, __root))
82
83#ifdef CONFIG_PROC_FS
84
85enum { MAX_IORES_LEVEL = 5 };
86
87static void *r_start(struct seq_file *m, loff_t *pos)
88 __acquires(resource_lock)
89{
90 struct resource *root = pde_data(file_inode(m->file));
91 struct resource *p;
92 loff_t l = *pos;
93
94 read_lock(&resource_lock);
95 for_each_resource(root, p, false) {
96 if (l-- == 0)
97 break;
98 }
99
100 return p;
101}
102
103static void *r_next(struct seq_file *m, void *v, loff_t *pos)
104{
105 struct resource *p = v;
106
107 (*pos)++;
108
109 return (void *)next_resource(p, false, NULL);
110}
111
112static void r_stop(struct seq_file *m, void *v)
113 __releases(resource_lock)
114{
115 read_unlock(&resource_lock);
116}
117
118static int r_show(struct seq_file *m, void *v)
119{
120 struct resource *root = pde_data(file_inode(m->file));
121 struct resource *r = v, *p;
122 unsigned long long start, end;
123 int width = root->end < 0x10000 ? 4 : 8;
124 int depth;
125
126 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
127 if (p->parent == root)
128 break;
129
130 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
131 start = r->start;
132 end = r->end;
133 } else {
134 start = end = 0;
135 }
136
137 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
138 depth * 2, "",
139 width, start,
140 width, end,
141 r->name ? r->name : "<BAD>");
142 return 0;
143}
144
145static const struct seq_operations resource_op = {
146 .start = r_start,
147 .next = r_next,
148 .stop = r_stop,
149 .show = r_show,
150};
151
152static int __init ioresources_init(void)
153{
154 proc_create_seq_data("ioports", 0, NULL, &resource_op,
155 &ioport_resource);
156 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
157 return 0;
158}
159__initcall(ioresources_init);
160
161#endif /* CONFIG_PROC_FS */
162
163static void free_resource(struct resource *res)
164{
165 /**
166 * If the resource was allocated using memblock early during boot
167 * we'll leak it here: we can only return full pages back to the
168 * buddy and trying to be smart and reusing them eventually in
169 * alloc_resource() overcomplicates resource handling.
170 */
171 if (res && PageSlab(virt_to_head_page(res)))
172 kfree(res);
173}
174
175static struct resource *alloc_resource(gfp_t flags)
176{
177 return kzalloc(sizeof(struct resource), flags);
178}
179
180/* Return the conflict entry if you can't request it */
181static struct resource * __request_resource(struct resource *root, struct resource *new)
182{
183 resource_size_t start = new->start;
184 resource_size_t end = new->end;
185 struct resource *tmp, **p;
186
187 if (end < start)
188 return root;
189 if (start < root->start)
190 return root;
191 if (end > root->end)
192 return root;
193 p = &root->child;
194 for (;;) {
195 tmp = *p;
196 if (!tmp || tmp->start > end) {
197 new->sibling = tmp;
198 *p = new;
199 new->parent = root;
200 return NULL;
201 }
202 p = &tmp->sibling;
203 if (tmp->end < start)
204 continue;
205 return tmp;
206 }
207}
208
209static int __release_resource(struct resource *old, bool release_child)
210{
211 struct resource *tmp, **p, *chd;
212
213 p = &old->parent->child;
214 for (;;) {
215 tmp = *p;
216 if (!tmp)
217 break;
218 if (tmp == old) {
219 if (release_child || !(tmp->child)) {
220 *p = tmp->sibling;
221 } else {
222 for (chd = tmp->child;; chd = chd->sibling) {
223 chd->parent = tmp->parent;
224 if (!(chd->sibling))
225 break;
226 }
227 *p = tmp->child;
228 chd->sibling = tmp->sibling;
229 }
230 old->parent = NULL;
231 return 0;
232 }
233 p = &tmp->sibling;
234 }
235 return -EINVAL;
236}
237
238static void __release_child_resources(struct resource *r)
239{
240 struct resource *tmp, *p;
241 resource_size_t size;
242
243 p = r->child;
244 r->child = NULL;
245 while (p) {
246 tmp = p;
247 p = p->sibling;
248
249 tmp->parent = NULL;
250 tmp->sibling = NULL;
251 __release_child_resources(tmp);
252
253 printk(KERN_DEBUG "release child resource %pR\n", tmp);
254 /* need to restore size, and keep flags */
255 size = resource_size(tmp);
256 tmp->start = 0;
257 tmp->end = size - 1;
258 }
259}
260
261void release_child_resources(struct resource *r)
262{
263 write_lock(&resource_lock);
264 __release_child_resources(r);
265 write_unlock(&resource_lock);
266}
267
268/**
269 * request_resource_conflict - request and reserve an I/O or memory resource
270 * @root: root resource descriptor
271 * @new: resource descriptor desired by caller
272 *
273 * Returns 0 for success, conflict resource on error.
274 */
275struct resource *request_resource_conflict(struct resource *root, struct resource *new)
276{
277 struct resource *conflict;
278
279 write_lock(&resource_lock);
280 conflict = __request_resource(root, new);
281 write_unlock(&resource_lock);
282 return conflict;
283}
284
285/**
286 * request_resource - request and reserve an I/O or memory resource
287 * @root: root resource descriptor
288 * @new: resource descriptor desired by caller
289 *
290 * Returns 0 for success, negative error code on error.
291 */
292int request_resource(struct resource *root, struct resource *new)
293{
294 struct resource *conflict;
295
296 conflict = request_resource_conflict(root, new);
297 return conflict ? -EBUSY : 0;
298}
299
300EXPORT_SYMBOL(request_resource);
301
302/**
303 * release_resource - release a previously reserved resource
304 * @old: resource pointer
305 */
306int release_resource(struct resource *old)
307{
308 int retval;
309
310 write_lock(&resource_lock);
311 retval = __release_resource(old, true);
312 write_unlock(&resource_lock);
313 return retval;
314}
315
316EXPORT_SYMBOL(release_resource);
317
318static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc)
319{
320 return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc);
321}
322
323/**
324 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
325 * [@start..@end].
326 *
327 * If a resource is found, returns 0 and @*res is overwritten with the part
328 * of the resource that's within [@start..@end]; if none is found, returns
329 * -ENODEV. Returns -EINVAL for invalid parameters.
330 *
331 * @start: start address of the resource searched for
332 * @end: end address of same resource
333 * @flags: flags which the resource must have
334 * @desc: descriptor the resource must have
335 * @res: return ptr, if resource found
336 *
337 * The caller must specify @start, @end, @flags, and @desc
338 * (which may be IORES_DESC_NONE).
339 */
340static int find_next_iomem_res(resource_size_t start, resource_size_t end,
341 unsigned long flags, unsigned long desc,
342 struct resource *res)
343{
344 struct resource *p;
345
346 if (!res)
347 return -EINVAL;
348
349 if (start >= end)
350 return -EINVAL;
351
352 read_lock(&resource_lock);
353
354 for_each_resource(&iomem_resource, p, false) {
355 /* If we passed the resource we are looking for, stop */
356 if (p->start > end) {
357 p = NULL;
358 break;
359 }
360
361 /* Skip until we find a range that matches what we look for */
362 if (p->end < start)
363 continue;
364
365 /* Found a match, break */
366 if (is_type_match(p, flags, desc))
367 break;
368 }
369
370 if (p) {
371 /* copy data */
372 *res = (struct resource) {
373 .start = max(start, p->start),
374 .end = min(end, p->end),
375 .flags = p->flags,
376 .desc = p->desc,
377 .parent = p->parent,
378 };
379 }
380
381 read_unlock(&resource_lock);
382 return p ? 0 : -ENODEV;
383}
384
385static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
386 unsigned long flags, unsigned long desc,
387 void *arg,
388 int (*func)(struct resource *, void *))
389{
390 struct resource res;
391 int ret = -EINVAL;
392
393 while (start < end &&
394 !find_next_iomem_res(start, end, flags, desc, &res)) {
395 ret = (*func)(&res, arg);
396 if (ret)
397 break;
398
399 start = res.end + 1;
400 }
401
402 return ret;
403}
404
405/**
406 * walk_iomem_res_desc - Walks through iomem resources and calls func()
407 * with matching resource ranges.
408 * *
409 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
410 * @flags: I/O resource flags
411 * @start: start addr
412 * @end: end addr
413 * @arg: function argument for the callback @func
414 * @func: callback function that is called for each qualifying resource area
415 *
416 * All the memory ranges which overlap start,end and also match flags and
417 * desc are valid candidates.
418 *
419 * NOTE: For a new descriptor search, define a new IORES_DESC in
420 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
421 */
422int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
423 u64 end, void *arg, int (*func)(struct resource *, void *))
424{
425 return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
426}
427EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
428
429/*
430 * This function calls the @func callback against all memory ranges of type
431 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
432 * Now, this function is only for System RAM, it deals with full ranges and
433 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
434 * ranges.
435 */
436int walk_system_ram_res(u64 start, u64 end, void *arg,
437 int (*func)(struct resource *, void *))
438{
439 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
440
441 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
442 func);
443}
444
445/*
446 * This function, being a variant of walk_system_ram_res(), calls the @func
447 * callback against all memory ranges of type System RAM which are marked as
448 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
449 * higher to lower.
450 */
451int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
452 int (*func)(struct resource *, void *))
453{
454 struct resource res, *rams;
455 int rams_size = 16, i;
456 unsigned long flags;
457 int ret = -1;
458
459 /* create a list */
460 rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
461 if (!rams)
462 return ret;
463
464 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
465 i = 0;
466 while ((start < end) &&
467 (!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
468 if (i >= rams_size) {
469 /* re-alloc */
470 struct resource *rams_new;
471
472 rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
473 GFP_KERNEL);
474 if (!rams_new)
475 goto out;
476
477 rams = rams_new;
478 rams_size += 16;
479 }
480
481 rams[i++] = res;
482 start = res.end + 1;
483 }
484
485 /* go reverse */
486 for (i--; i >= 0; i--) {
487 ret = (*func)(&rams[i], arg);
488 if (ret)
489 break;
490 }
491
492out:
493 kvfree(rams);
494 return ret;
495}
496
497/*
498 * This function calls the @func callback against all memory ranges, which
499 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
500 */
501int walk_mem_res(u64 start, u64 end, void *arg,
502 int (*func)(struct resource *, void *))
503{
504 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
505
506 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
507 func);
508}
509
510/*
511 * This function calls the @func callback against all memory ranges of type
512 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
513 * It is to be used only for System RAM.
514 */
515int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
516 void *arg, int (*func)(unsigned long, unsigned long, void *))
517{
518 resource_size_t start, end;
519 unsigned long flags;
520 struct resource res;
521 unsigned long pfn, end_pfn;
522 int ret = -EINVAL;
523
524 start = (u64) start_pfn << PAGE_SHIFT;
525 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
526 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
527 while (start < end &&
528 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
529 pfn = PFN_UP(res.start);
530 end_pfn = PFN_DOWN(res.end + 1);
531 if (end_pfn > pfn)
532 ret = (*func)(pfn, end_pfn - pfn, arg);
533 if (ret)
534 break;
535 start = res.end + 1;
536 }
537 return ret;
538}
539
540static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
541{
542 return 1;
543}
544
545/*
546 * This generic page_is_ram() returns true if specified address is
547 * registered as System RAM in iomem_resource list.
548 */
549int __weak page_is_ram(unsigned long pfn)
550{
551 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
552}
553EXPORT_SYMBOL_GPL(page_is_ram);
554
555static int __region_intersects(struct resource *parent, resource_size_t start,
556 size_t size, unsigned long flags,
557 unsigned long desc)
558{
559 int type = 0; int other = 0;
560 struct resource *p, *dp;
561 struct resource res, o;
562 bool covered;
563
564 res.start = start;
565 res.end = start + size - 1;
566
567 for (p = parent->child; p ; p = p->sibling) {
568 if (!resource_intersection(p, &res, &o))
569 continue;
570 if (is_type_match(p, flags, desc)) {
571 type++;
572 continue;
573 }
574 /*
575 * Continue to search in descendant resources as if the
576 * matched descendant resources cover some ranges of 'p'.
577 *
578 * |------------- "CXL Window 0" ------------|
579 * |-- "System RAM" --|
580 *
581 * will behave similar as the following fake resource
582 * tree when searching "System RAM".
583 *
584 * |-- "System RAM" --||-- "CXL Window 0a" --|
585 */
586 covered = false;
587 for_each_resource(p, dp, false) {
588 if (!resource_overlaps(dp, &res))
589 continue;
590 if (is_type_match(dp, flags, desc)) {
591 type++;
592 /*
593 * Range from 'o.start' to 'dp->start'
594 * isn't covered by matched resource.
595 */
596 if (dp->start > o.start)
597 break;
598 if (dp->end >= o.end) {
599 covered = true;
600 break;
601 }
602 /* Remove covered range */
603 o.start = max(o.start, dp->end + 1);
604 }
605 }
606 if (!covered)
607 other++;
608 }
609
610 if (type == 0)
611 return REGION_DISJOINT;
612
613 if (other == 0)
614 return REGION_INTERSECTS;
615
616 return REGION_MIXED;
617}
618
619/**
620 * region_intersects() - determine intersection of region with known resources
621 * @start: region start address
622 * @size: size of region
623 * @flags: flags of resource (in iomem_resource)
624 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
625 *
626 * Check if the specified region partially overlaps or fully eclipses a
627 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
628 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
629 * return REGION_MIXED if the region overlaps @flags/@desc and another
630 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
631 * and no other defined resource. Note that REGION_INTERSECTS is also
632 * returned in the case when the specified region overlaps RAM and undefined
633 * memory holes.
634 *
635 * region_intersect() is used by memory remapping functions to ensure
636 * the user is not remapping RAM and is a vast speed up over walking
637 * through the resource table page by page.
638 */
639int region_intersects(resource_size_t start, size_t size, unsigned long flags,
640 unsigned long desc)
641{
642 int ret;
643
644 read_lock(&resource_lock);
645 ret = __region_intersects(&iomem_resource, start, size, flags, desc);
646 read_unlock(&resource_lock);
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(region_intersects);
651
652void __weak arch_remove_reservations(struct resource *avail)
653{
654}
655
656static void resource_clip(struct resource *res, resource_size_t min,
657 resource_size_t max)
658{
659 if (res->start < min)
660 res->start = min;
661 if (res->end > max)
662 res->end = max;
663}
664
665/*
666 * Find empty space in the resource tree with the given range and
667 * alignment constraints
668 */
669static int __find_resource_space(struct resource *root, struct resource *old,
670 struct resource *new, resource_size_t size,
671 struct resource_constraint *constraint)
672{
673 struct resource *this = root->child;
674 struct resource tmp = *new, avail, alloc;
675 resource_alignf alignf = constraint->alignf;
676
677 tmp.start = root->start;
678 /*
679 * Skip past an allocated resource that starts at 0, since the assignment
680 * of this->start - 1 to tmp->end below would cause an underflow.
681 */
682 if (this && this->start == root->start) {
683 tmp.start = (this == old) ? old->start : this->end + 1;
684 this = this->sibling;
685 }
686 for(;;) {
687 if (this)
688 tmp.end = (this == old) ? this->end : this->start - 1;
689 else
690 tmp.end = root->end;
691
692 if (tmp.end < tmp.start)
693 goto next;
694
695 resource_clip(&tmp, constraint->min, constraint->max);
696 arch_remove_reservations(&tmp);
697
698 /* Check for overflow after ALIGN() */
699 avail.start = ALIGN(tmp.start, constraint->align);
700 avail.end = tmp.end;
701 avail.flags = new->flags & ~IORESOURCE_UNSET;
702 if (avail.start >= tmp.start) {
703 alloc.flags = avail.flags;
704 if (alignf) {
705 alloc.start = alignf(constraint->alignf_data,
706 &avail, size, constraint->align);
707 } else {
708 alloc.start = avail.start;
709 }
710 alloc.end = alloc.start + size - 1;
711 if (alloc.start <= alloc.end &&
712 resource_contains(&avail, &alloc)) {
713 new->start = alloc.start;
714 new->end = alloc.end;
715 return 0;
716 }
717 }
718
719next: if (!this || this->end == root->end)
720 break;
721
722 if (this != old)
723 tmp.start = this->end + 1;
724 this = this->sibling;
725 }
726 return -EBUSY;
727}
728
729/**
730 * find_resource_space - Find empty space in the resource tree
731 * @root: Root resource descriptor
732 * @new: Resource descriptor awaiting an empty resource space
733 * @size: The minimum size of the empty space
734 * @constraint: The range and alignment constraints to be met
735 *
736 * Finds an empty space under @root in the resource tree satisfying range and
737 * alignment @constraints.
738 *
739 * Return:
740 * * %0 - if successful, @new members start, end, and flags are altered.
741 * * %-EBUSY - if no empty space was found.
742 */
743int find_resource_space(struct resource *root, struct resource *new,
744 resource_size_t size,
745 struct resource_constraint *constraint)
746{
747 return __find_resource_space(root, NULL, new, size, constraint);
748}
749EXPORT_SYMBOL_GPL(find_resource_space);
750
751/**
752 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
753 * The resource will be relocated if the new size cannot be reallocated in the
754 * current location.
755 *
756 * @root: root resource descriptor
757 * @old: resource descriptor desired by caller
758 * @newsize: new size of the resource descriptor
759 * @constraint: the memory range and alignment constraints to be met.
760 */
761static int reallocate_resource(struct resource *root, struct resource *old,
762 resource_size_t newsize,
763 struct resource_constraint *constraint)
764{
765 int err=0;
766 struct resource new = *old;
767 struct resource *conflict;
768
769 write_lock(&resource_lock);
770
771 if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
772 goto out;
773
774 if (resource_contains(&new, old)) {
775 old->start = new.start;
776 old->end = new.end;
777 goto out;
778 }
779
780 if (old->child) {
781 err = -EBUSY;
782 goto out;
783 }
784
785 if (resource_contains(old, &new)) {
786 old->start = new.start;
787 old->end = new.end;
788 } else {
789 __release_resource(old, true);
790 *old = new;
791 conflict = __request_resource(root, old);
792 BUG_ON(conflict);
793 }
794out:
795 write_unlock(&resource_lock);
796 return err;
797}
798
799
800/**
801 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
802 * The resource will be reallocated with a new size if it was already allocated
803 * @root: root resource descriptor
804 * @new: resource descriptor desired by caller
805 * @size: requested resource region size
806 * @min: minimum boundary to allocate
807 * @max: maximum boundary to allocate
808 * @align: alignment requested, in bytes
809 * @alignf: alignment function, optional, called if not NULL
810 * @alignf_data: arbitrary data to pass to the @alignf function
811 */
812int allocate_resource(struct resource *root, struct resource *new,
813 resource_size_t size, resource_size_t min,
814 resource_size_t max, resource_size_t align,
815 resource_alignf alignf,
816 void *alignf_data)
817{
818 int err;
819 struct resource_constraint constraint;
820
821 constraint.min = min;
822 constraint.max = max;
823 constraint.align = align;
824 constraint.alignf = alignf;
825 constraint.alignf_data = alignf_data;
826
827 if ( new->parent ) {
828 /* resource is already allocated, try reallocating with
829 the new constraints */
830 return reallocate_resource(root, new, size, &constraint);
831 }
832
833 write_lock(&resource_lock);
834 err = find_resource_space(root, new, size, &constraint);
835 if (err >= 0 && __request_resource(root, new))
836 err = -EBUSY;
837 write_unlock(&resource_lock);
838 return err;
839}
840
841EXPORT_SYMBOL(allocate_resource);
842
843/**
844 * lookup_resource - find an existing resource by a resource start address
845 * @root: root resource descriptor
846 * @start: resource start address
847 *
848 * Returns a pointer to the resource if found, NULL otherwise
849 */
850struct resource *lookup_resource(struct resource *root, resource_size_t start)
851{
852 struct resource *res;
853
854 read_lock(&resource_lock);
855 for (res = root->child; res; res = res->sibling) {
856 if (res->start == start)
857 break;
858 }
859 read_unlock(&resource_lock);
860
861 return res;
862}
863
864/*
865 * Insert a resource into the resource tree. If successful, return NULL,
866 * otherwise return the conflicting resource (compare to __request_resource())
867 */
868static struct resource * __insert_resource(struct resource *parent, struct resource *new)
869{
870 struct resource *first, *next;
871
872 for (;; parent = first) {
873 first = __request_resource(parent, new);
874 if (!first)
875 return first;
876
877 if (first == parent)
878 return first;
879 if (WARN_ON(first == new)) /* duplicated insertion */
880 return first;
881
882 if ((first->start > new->start) || (first->end < new->end))
883 break;
884 if ((first->start == new->start) && (first->end == new->end))
885 break;
886 }
887
888 for (next = first; ; next = next->sibling) {
889 /* Partial overlap? Bad, and unfixable */
890 if (next->start < new->start || next->end > new->end)
891 return next;
892 if (!next->sibling)
893 break;
894 if (next->sibling->start > new->end)
895 break;
896 }
897
898 new->parent = parent;
899 new->sibling = next->sibling;
900 new->child = first;
901
902 next->sibling = NULL;
903 for (next = first; next; next = next->sibling)
904 next->parent = new;
905
906 if (parent->child == first) {
907 parent->child = new;
908 } else {
909 next = parent->child;
910 while (next->sibling != first)
911 next = next->sibling;
912 next->sibling = new;
913 }
914 return NULL;
915}
916
917/**
918 * insert_resource_conflict - Inserts resource in the resource tree
919 * @parent: parent of the new resource
920 * @new: new resource to insert
921 *
922 * Returns 0 on success, conflict resource if the resource can't be inserted.
923 *
924 * This function is equivalent to request_resource_conflict when no conflict
925 * happens. If a conflict happens, and the conflicting resources
926 * entirely fit within the range of the new resource, then the new
927 * resource is inserted and the conflicting resources become children of
928 * the new resource.
929 *
930 * This function is intended for producers of resources, such as FW modules
931 * and bus drivers.
932 */
933struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
934{
935 struct resource *conflict;
936
937 write_lock(&resource_lock);
938 conflict = __insert_resource(parent, new);
939 write_unlock(&resource_lock);
940 return conflict;
941}
942
943/**
944 * insert_resource - Inserts a resource in the resource tree
945 * @parent: parent of the new resource
946 * @new: new resource to insert
947 *
948 * Returns 0 on success, -EBUSY if the resource can't be inserted.
949 *
950 * This function is intended for producers of resources, such as FW modules
951 * and bus drivers.
952 */
953int insert_resource(struct resource *parent, struct resource *new)
954{
955 struct resource *conflict;
956
957 conflict = insert_resource_conflict(parent, new);
958 return conflict ? -EBUSY : 0;
959}
960EXPORT_SYMBOL_GPL(insert_resource);
961
962/**
963 * insert_resource_expand_to_fit - Insert a resource into the resource tree
964 * @root: root resource descriptor
965 * @new: new resource to insert
966 *
967 * Insert a resource into the resource tree, possibly expanding it in order
968 * to make it encompass any conflicting resources.
969 */
970void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
971{
972 if (new->parent)
973 return;
974
975 write_lock(&resource_lock);
976 for (;;) {
977 struct resource *conflict;
978
979 conflict = __insert_resource(root, new);
980 if (!conflict)
981 break;
982 if (conflict == root)
983 break;
984
985 /* Ok, expand resource to cover the conflict, then try again .. */
986 if (conflict->start < new->start)
987 new->start = conflict->start;
988 if (conflict->end > new->end)
989 new->end = conflict->end;
990
991 pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
992 }
993 write_unlock(&resource_lock);
994}
995/*
996 * Not for general consumption, only early boot memory map parsing, PCI
997 * resource discovery, and late discovery of CXL resources are expected
998 * to use this interface. The former are built-in and only the latter,
999 * CXL, is a module.
1000 */
1001EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, "CXL");
1002
1003/**
1004 * remove_resource - Remove a resource in the resource tree
1005 * @old: resource to remove
1006 *
1007 * Returns 0 on success, -EINVAL if the resource is not valid.
1008 *
1009 * This function removes a resource previously inserted by insert_resource()
1010 * or insert_resource_conflict(), and moves the children (if any) up to
1011 * where they were before. insert_resource() and insert_resource_conflict()
1012 * insert a new resource, and move any conflicting resources down to the
1013 * children of the new resource.
1014 *
1015 * insert_resource(), insert_resource_conflict() and remove_resource() are
1016 * intended for producers of resources, such as FW modules and bus drivers.
1017 */
1018int remove_resource(struct resource *old)
1019{
1020 int retval;
1021
1022 write_lock(&resource_lock);
1023 retval = __release_resource(old, false);
1024 write_unlock(&resource_lock);
1025 return retval;
1026}
1027EXPORT_SYMBOL_GPL(remove_resource);
1028
1029static int __adjust_resource(struct resource *res, resource_size_t start,
1030 resource_size_t size)
1031{
1032 struct resource *tmp, *parent = res->parent;
1033 resource_size_t end = start + size - 1;
1034 int result = -EBUSY;
1035
1036 if (!parent)
1037 goto skip;
1038
1039 if ((start < parent->start) || (end > parent->end))
1040 goto out;
1041
1042 if (res->sibling && (res->sibling->start <= end))
1043 goto out;
1044
1045 tmp = parent->child;
1046 if (tmp != res) {
1047 while (tmp->sibling != res)
1048 tmp = tmp->sibling;
1049 if (start <= tmp->end)
1050 goto out;
1051 }
1052
1053skip:
1054 for (tmp = res->child; tmp; tmp = tmp->sibling)
1055 if ((tmp->start < start) || (tmp->end > end))
1056 goto out;
1057
1058 res->start = start;
1059 res->end = end;
1060 result = 0;
1061
1062 out:
1063 return result;
1064}
1065
1066/**
1067 * adjust_resource - modify a resource's start and size
1068 * @res: resource to modify
1069 * @start: new start value
1070 * @size: new size
1071 *
1072 * Given an existing resource, change its start and size to match the
1073 * arguments. Returns 0 on success, -EBUSY if it can't fit.
1074 * Existing children of the resource are assumed to be immutable.
1075 */
1076int adjust_resource(struct resource *res, resource_size_t start,
1077 resource_size_t size)
1078{
1079 int result;
1080
1081 write_lock(&resource_lock);
1082 result = __adjust_resource(res, start, size);
1083 write_unlock(&resource_lock);
1084 return result;
1085}
1086EXPORT_SYMBOL(adjust_resource);
1087
1088static void __init
1089__reserve_region_with_split(struct resource *root, resource_size_t start,
1090 resource_size_t end, const char *name)
1091{
1092 struct resource *parent = root;
1093 struct resource *conflict;
1094 struct resource *res = alloc_resource(GFP_ATOMIC);
1095 struct resource *next_res = NULL;
1096 int type = resource_type(root);
1097
1098 if (!res)
1099 return;
1100
1101 res->name = name;
1102 res->start = start;
1103 res->end = end;
1104 res->flags = type | IORESOURCE_BUSY;
1105 res->desc = IORES_DESC_NONE;
1106
1107 while (1) {
1108
1109 conflict = __request_resource(parent, res);
1110 if (!conflict) {
1111 if (!next_res)
1112 break;
1113 res = next_res;
1114 next_res = NULL;
1115 continue;
1116 }
1117
1118 /* conflict covered whole area */
1119 if (conflict->start <= res->start &&
1120 conflict->end >= res->end) {
1121 free_resource(res);
1122 WARN_ON(next_res);
1123 break;
1124 }
1125
1126 /* failed, split and try again */
1127 if (conflict->start > res->start) {
1128 end = res->end;
1129 res->end = conflict->start - 1;
1130 if (conflict->end < end) {
1131 next_res = alloc_resource(GFP_ATOMIC);
1132 if (!next_res) {
1133 free_resource(res);
1134 break;
1135 }
1136 next_res->name = name;
1137 next_res->start = conflict->end + 1;
1138 next_res->end = end;
1139 next_res->flags = type | IORESOURCE_BUSY;
1140 next_res->desc = IORES_DESC_NONE;
1141 }
1142 } else {
1143 res->start = conflict->end + 1;
1144 }
1145 }
1146
1147}
1148
1149void __init
1150reserve_region_with_split(struct resource *root, resource_size_t start,
1151 resource_size_t end, const char *name)
1152{
1153 int abort = 0;
1154
1155 write_lock(&resource_lock);
1156 if (root->start > start || root->end < end) {
1157 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1158 (unsigned long long)start, (unsigned long long)end,
1159 root);
1160 if (start > root->end || end < root->start)
1161 abort = 1;
1162 else {
1163 if (end > root->end)
1164 end = root->end;
1165 if (start < root->start)
1166 start = root->start;
1167 pr_err("fixing request to [0x%llx-0x%llx]\n",
1168 (unsigned long long)start,
1169 (unsigned long long)end);
1170 }
1171 dump_stack();
1172 }
1173 if (!abort)
1174 __reserve_region_with_split(root, start, end, name);
1175 write_unlock(&resource_lock);
1176}
1177
1178/**
1179 * resource_alignment - calculate resource's alignment
1180 * @res: resource pointer
1181 *
1182 * Returns alignment on success, 0 (invalid alignment) on failure.
1183 */
1184resource_size_t resource_alignment(struct resource *res)
1185{
1186 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1187 case IORESOURCE_SIZEALIGN:
1188 return resource_size(res);
1189 case IORESOURCE_STARTALIGN:
1190 return res->start;
1191 default:
1192 return 0;
1193 }
1194}
1195
1196/*
1197 * This is compatibility stuff for IO resources.
1198 *
1199 * Note how this, unlike the above, knows about
1200 * the IO flag meanings (busy etc).
1201 *
1202 * request_region creates a new busy region.
1203 *
1204 * release_region releases a matching busy region.
1205 */
1206
1207static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1208
1209static struct inode *iomem_inode;
1210
1211#ifdef CONFIG_IO_STRICT_DEVMEM
1212static void revoke_iomem(struct resource *res)
1213{
1214 /* pairs with smp_store_release() in iomem_init_inode() */
1215 struct inode *inode = smp_load_acquire(&iomem_inode);
1216
1217 /*
1218 * Check that the initialization has completed. Losing the race
1219 * is ok because it means drivers are claiming resources before
1220 * the fs_initcall level of init and prevent iomem_get_mapping users
1221 * from establishing mappings.
1222 */
1223 if (!inode)
1224 return;
1225
1226 /*
1227 * The expectation is that the driver has successfully marked
1228 * the resource busy by this point, so devmem_is_allowed()
1229 * should start returning false, however for performance this
1230 * does not iterate the entire resource range.
1231 */
1232 if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1233 devmem_is_allowed(PHYS_PFN(res->end))) {
1234 /*
1235 * *cringe* iomem=relaxed says "go ahead, what's the
1236 * worst that can happen?"
1237 */
1238 return;
1239 }
1240
1241 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1242}
1243#else
1244static void revoke_iomem(struct resource *res) {}
1245#endif
1246
1247struct address_space *iomem_get_mapping(void)
1248{
1249 /*
1250 * This function is only called from file open paths, hence guaranteed
1251 * that fs_initcalls have completed and no need to check for NULL. But
1252 * since revoke_iomem can be called before the initcall we still need
1253 * the barrier to appease checkers.
1254 */
1255 return smp_load_acquire(&iomem_inode)->i_mapping;
1256}
1257
1258static int __request_region_locked(struct resource *res, struct resource *parent,
1259 resource_size_t start, resource_size_t n,
1260 const char *name, int flags)
1261{
1262 DECLARE_WAITQUEUE(wait, current);
1263
1264 res->name = name;
1265 res->start = start;
1266 res->end = start + n - 1;
1267
1268 for (;;) {
1269 struct resource *conflict;
1270
1271 res->flags = resource_type(parent) | resource_ext_type(parent);
1272 res->flags |= IORESOURCE_BUSY | flags;
1273 res->desc = parent->desc;
1274
1275 conflict = __request_resource(parent, res);
1276 if (!conflict)
1277 break;
1278 /*
1279 * mm/hmm.c reserves physical addresses which then
1280 * become unavailable to other users. Conflicts are
1281 * not expected. Warn to aid debugging if encountered.
1282 */
1283 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1284 pr_warn("Unaddressable device %s %pR conflicts with %pR",
1285 conflict->name, conflict, res);
1286 }
1287 if (conflict != parent) {
1288 if (!(conflict->flags & IORESOURCE_BUSY)) {
1289 parent = conflict;
1290 continue;
1291 }
1292 }
1293 if (conflict->flags & flags & IORESOURCE_MUXED) {
1294 add_wait_queue(&muxed_resource_wait, &wait);
1295 write_unlock(&resource_lock);
1296 set_current_state(TASK_UNINTERRUPTIBLE);
1297 schedule();
1298 remove_wait_queue(&muxed_resource_wait, &wait);
1299 write_lock(&resource_lock);
1300 continue;
1301 }
1302 /* Uhhuh, that didn't work out.. */
1303 return -EBUSY;
1304 }
1305
1306 return 0;
1307}
1308
1309/**
1310 * __request_region - create a new busy resource region
1311 * @parent: parent resource descriptor
1312 * @start: resource start address
1313 * @n: resource region size
1314 * @name: reserving caller's ID string
1315 * @flags: IO resource flags
1316 */
1317struct resource *__request_region(struct resource *parent,
1318 resource_size_t start, resource_size_t n,
1319 const char *name, int flags)
1320{
1321 struct resource *res = alloc_resource(GFP_KERNEL);
1322 int ret;
1323
1324 if (!res)
1325 return NULL;
1326
1327 write_lock(&resource_lock);
1328 ret = __request_region_locked(res, parent, start, n, name, flags);
1329 write_unlock(&resource_lock);
1330
1331 if (ret) {
1332 free_resource(res);
1333 return NULL;
1334 }
1335
1336 if (parent == &iomem_resource)
1337 revoke_iomem(res);
1338
1339 return res;
1340}
1341EXPORT_SYMBOL(__request_region);
1342
1343/**
1344 * __release_region - release a previously reserved resource region
1345 * @parent: parent resource descriptor
1346 * @start: resource start address
1347 * @n: resource region size
1348 *
1349 * The described resource region must match a currently busy region.
1350 */
1351void __release_region(struct resource *parent, resource_size_t start,
1352 resource_size_t n)
1353{
1354 struct resource **p;
1355 resource_size_t end;
1356
1357 p = &parent->child;
1358 end = start + n - 1;
1359
1360 write_lock(&resource_lock);
1361
1362 for (;;) {
1363 struct resource *res = *p;
1364
1365 if (!res)
1366 break;
1367 if (res->start <= start && res->end >= end) {
1368 if (!(res->flags & IORESOURCE_BUSY)) {
1369 p = &res->child;
1370 continue;
1371 }
1372 if (res->start != start || res->end != end)
1373 break;
1374 *p = res->sibling;
1375 write_unlock(&resource_lock);
1376 if (res->flags & IORESOURCE_MUXED)
1377 wake_up(&muxed_resource_wait);
1378 free_resource(res);
1379 return;
1380 }
1381 p = &res->sibling;
1382 }
1383
1384 write_unlock(&resource_lock);
1385
1386 pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1387}
1388EXPORT_SYMBOL(__release_region);
1389
1390#ifdef CONFIG_MEMORY_HOTREMOVE
1391/**
1392 * release_mem_region_adjustable - release a previously reserved memory region
1393 * @start: resource start address
1394 * @size: resource region size
1395 *
1396 * This interface is intended for memory hot-delete. The requested region
1397 * is released from a currently busy memory resource. The requested region
1398 * must either match exactly or fit into a single busy resource entry. In
1399 * the latter case, the remaining resource is adjusted accordingly.
1400 * Existing children of the busy memory resource must be immutable in the
1401 * request.
1402 *
1403 * Note:
1404 * - Additional release conditions, such as overlapping region, can be
1405 * supported after they are confirmed as valid cases.
1406 * - When a busy memory resource gets split into two entries, the code
1407 * assumes that all children remain in the lower address entry for
1408 * simplicity. Enhance this logic when necessary.
1409 */
1410void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1411{
1412 struct resource *parent = &iomem_resource;
1413 struct resource *new_res = NULL;
1414 bool alloc_nofail = false;
1415 struct resource **p;
1416 struct resource *res;
1417 resource_size_t end;
1418
1419 end = start + size - 1;
1420 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1421 return;
1422
1423 /*
1424 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1425 * just before releasing the region. This is highly unlikely to
1426 * fail - let's play save and make it never fail as the caller cannot
1427 * perform any error handling (e.g., trying to re-add memory will fail
1428 * similarly).
1429 */
1430retry:
1431 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1432
1433 p = &parent->child;
1434 write_lock(&resource_lock);
1435
1436 while ((res = *p)) {
1437 if (res->start >= end)
1438 break;
1439
1440 /* look for the next resource if it does not fit into */
1441 if (res->start > start || res->end < end) {
1442 p = &res->sibling;
1443 continue;
1444 }
1445
1446 if (!(res->flags & IORESOURCE_MEM))
1447 break;
1448
1449 if (!(res->flags & IORESOURCE_BUSY)) {
1450 p = &res->child;
1451 continue;
1452 }
1453
1454 /* found the target resource; let's adjust accordingly */
1455 if (res->start == start && res->end == end) {
1456 /* free the whole entry */
1457 *p = res->sibling;
1458 free_resource(res);
1459 } else if (res->start == start && res->end != end) {
1460 /* adjust the start */
1461 WARN_ON_ONCE(__adjust_resource(res, end + 1,
1462 res->end - end));
1463 } else if (res->start != start && res->end == end) {
1464 /* adjust the end */
1465 WARN_ON_ONCE(__adjust_resource(res, res->start,
1466 start - res->start));
1467 } else {
1468 /* split into two entries - we need a new resource */
1469 if (!new_res) {
1470 new_res = alloc_resource(GFP_ATOMIC);
1471 if (!new_res) {
1472 alloc_nofail = true;
1473 write_unlock(&resource_lock);
1474 goto retry;
1475 }
1476 }
1477 new_res->name = res->name;
1478 new_res->start = end + 1;
1479 new_res->end = res->end;
1480 new_res->flags = res->flags;
1481 new_res->desc = res->desc;
1482 new_res->parent = res->parent;
1483 new_res->sibling = res->sibling;
1484 new_res->child = NULL;
1485
1486 if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1487 start - res->start)))
1488 break;
1489 res->sibling = new_res;
1490 new_res = NULL;
1491 }
1492
1493 break;
1494 }
1495
1496 write_unlock(&resource_lock);
1497 free_resource(new_res);
1498}
1499#endif /* CONFIG_MEMORY_HOTREMOVE */
1500
1501#ifdef CONFIG_MEMORY_HOTPLUG
1502static bool system_ram_resources_mergeable(struct resource *r1,
1503 struct resource *r2)
1504{
1505 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1506 return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1507 r1->name == r2->name && r1->desc == r2->desc &&
1508 !r1->child && !r2->child;
1509}
1510
1511/**
1512 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1513 * merge it with adjacent, mergeable resources
1514 * @res: resource descriptor
1515 *
1516 * This interface is intended for memory hotplug, whereby lots of contiguous
1517 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1518 * the actual resource boundaries are not of interest (e.g., it might be
1519 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1520 * same parent, and that don't have any children are considered. All mergeable
1521 * resources must be immutable during the request.
1522 *
1523 * Note:
1524 * - The caller has to make sure that no pointers to resources that are
1525 * marked mergeable are used anymore after this call - the resource might
1526 * be freed and the pointer might be stale!
1527 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1528 */
1529void merge_system_ram_resource(struct resource *res)
1530{
1531 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1532 struct resource *cur;
1533
1534 if (WARN_ON_ONCE((res->flags & flags) != flags))
1535 return;
1536
1537 write_lock(&resource_lock);
1538 res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1539
1540 /* Try to merge with next item in the list. */
1541 cur = res->sibling;
1542 if (cur && system_ram_resources_mergeable(res, cur)) {
1543 res->end = cur->end;
1544 res->sibling = cur->sibling;
1545 free_resource(cur);
1546 }
1547
1548 /* Try to merge with previous item in the list. */
1549 cur = res->parent->child;
1550 while (cur && cur->sibling != res)
1551 cur = cur->sibling;
1552 if (cur && system_ram_resources_mergeable(cur, res)) {
1553 cur->end = res->end;
1554 cur->sibling = res->sibling;
1555 free_resource(res);
1556 }
1557 write_unlock(&resource_lock);
1558}
1559#endif /* CONFIG_MEMORY_HOTPLUG */
1560
1561/*
1562 * Managed region resource
1563 */
1564static void devm_resource_release(struct device *dev, void *ptr)
1565{
1566 struct resource **r = ptr;
1567
1568 release_resource(*r);
1569}
1570
1571/**
1572 * devm_request_resource() - request and reserve an I/O or memory resource
1573 * @dev: device for which to request the resource
1574 * @root: root of the resource tree from which to request the resource
1575 * @new: descriptor of the resource to request
1576 *
1577 * This is a device-managed version of request_resource(). There is usually
1578 * no need to release resources requested by this function explicitly since
1579 * that will be taken care of when the device is unbound from its driver.
1580 * If for some reason the resource needs to be released explicitly, because
1581 * of ordering issues for example, drivers must call devm_release_resource()
1582 * rather than the regular release_resource().
1583 *
1584 * When a conflict is detected between any existing resources and the newly
1585 * requested resource, an error message will be printed.
1586 *
1587 * Returns 0 on success or a negative error code on failure.
1588 */
1589int devm_request_resource(struct device *dev, struct resource *root,
1590 struct resource *new)
1591{
1592 struct resource *conflict, **ptr;
1593
1594 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1595 if (!ptr)
1596 return -ENOMEM;
1597
1598 *ptr = new;
1599
1600 conflict = request_resource_conflict(root, new);
1601 if (conflict) {
1602 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1603 new, conflict->name, conflict);
1604 devres_free(ptr);
1605 return -EBUSY;
1606 }
1607
1608 devres_add(dev, ptr);
1609 return 0;
1610}
1611EXPORT_SYMBOL(devm_request_resource);
1612
1613static int devm_resource_match(struct device *dev, void *res, void *data)
1614{
1615 struct resource **ptr = res;
1616
1617 return *ptr == data;
1618}
1619
1620/**
1621 * devm_release_resource() - release a previously requested resource
1622 * @dev: device for which to release the resource
1623 * @new: descriptor of the resource to release
1624 *
1625 * Releases a resource previously requested using devm_request_resource().
1626 */
1627void devm_release_resource(struct device *dev, struct resource *new)
1628{
1629 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1630 new));
1631}
1632EXPORT_SYMBOL(devm_release_resource);
1633
1634struct region_devres {
1635 struct resource *parent;
1636 resource_size_t start;
1637 resource_size_t n;
1638};
1639
1640static void devm_region_release(struct device *dev, void *res)
1641{
1642 struct region_devres *this = res;
1643
1644 __release_region(this->parent, this->start, this->n);
1645}
1646
1647static int devm_region_match(struct device *dev, void *res, void *match_data)
1648{
1649 struct region_devres *this = res, *match = match_data;
1650
1651 return this->parent == match->parent &&
1652 this->start == match->start && this->n == match->n;
1653}
1654
1655struct resource *
1656__devm_request_region(struct device *dev, struct resource *parent,
1657 resource_size_t start, resource_size_t n, const char *name)
1658{
1659 struct region_devres *dr = NULL;
1660 struct resource *res;
1661
1662 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1663 GFP_KERNEL);
1664 if (!dr)
1665 return NULL;
1666
1667 dr->parent = parent;
1668 dr->start = start;
1669 dr->n = n;
1670
1671 res = __request_region(parent, start, n, name, 0);
1672 if (res)
1673 devres_add(dev, dr);
1674 else
1675 devres_free(dr);
1676
1677 return res;
1678}
1679EXPORT_SYMBOL(__devm_request_region);
1680
1681void __devm_release_region(struct device *dev, struct resource *parent,
1682 resource_size_t start, resource_size_t n)
1683{
1684 struct region_devres match_data = { parent, start, n };
1685
1686 __release_region(parent, start, n);
1687 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1688 &match_data));
1689}
1690EXPORT_SYMBOL(__devm_release_region);
1691
1692/*
1693 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1694 */
1695#define MAXRESERVE 4
1696static int __init reserve_setup(char *str)
1697{
1698 static int reserved;
1699 static struct resource reserve[MAXRESERVE];
1700
1701 for (;;) {
1702 unsigned int io_start, io_num;
1703 int x = reserved;
1704 struct resource *parent;
1705
1706 if (get_option(&str, &io_start) != 2)
1707 break;
1708 if (get_option(&str, &io_num) == 0)
1709 break;
1710 if (x < MAXRESERVE) {
1711 struct resource *res = reserve + x;
1712
1713 /*
1714 * If the region starts below 0x10000, we assume it's
1715 * I/O port space; otherwise assume it's memory.
1716 */
1717 if (io_start < 0x10000) {
1718 res->flags = IORESOURCE_IO;
1719 parent = &ioport_resource;
1720 } else {
1721 res->flags = IORESOURCE_MEM;
1722 parent = &iomem_resource;
1723 }
1724 res->name = "reserved";
1725 res->start = io_start;
1726 res->end = io_start + io_num - 1;
1727 res->flags |= IORESOURCE_BUSY;
1728 res->desc = IORES_DESC_NONE;
1729 res->child = NULL;
1730 if (request_resource(parent, res) == 0)
1731 reserved = x+1;
1732 }
1733 }
1734 return 1;
1735}
1736__setup("reserve=", reserve_setup);
1737
1738/*
1739 * Check if the requested addr and size spans more than any slot in the
1740 * iomem resource tree.
1741 */
1742int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1743{
1744 resource_size_t end = addr + size - 1;
1745 struct resource *p;
1746 int err = 0;
1747
1748 read_lock(&resource_lock);
1749 for_each_resource(&iomem_resource, p, false) {
1750 /*
1751 * We can probably skip the resources without
1752 * IORESOURCE_IO attribute?
1753 */
1754 if (p->start > end)
1755 continue;
1756 if (p->end < addr)
1757 continue;
1758 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1759 PFN_DOWN(p->end) >= PFN_DOWN(end))
1760 continue;
1761 /*
1762 * if a resource is "BUSY", it's not a hardware resource
1763 * but a driver mapping of such a resource; we don't want
1764 * to warn for those; some drivers legitimately map only
1765 * partial hardware resources. (example: vesafb)
1766 */
1767 if (p->flags & IORESOURCE_BUSY)
1768 continue;
1769
1770 pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1771 &addr, &end, p->name, p);
1772 err = -1;
1773 break;
1774 }
1775 read_unlock(&resource_lock);
1776
1777 return err;
1778}
1779
1780#ifdef CONFIG_STRICT_DEVMEM
1781static int strict_iomem_checks = 1;
1782#else
1783static int strict_iomem_checks;
1784#endif
1785
1786/*
1787 * Check if an address is exclusive to the kernel and must not be mapped to
1788 * user space, for example, via /dev/mem.
1789 *
1790 * Returns true if exclusive to the kernel, otherwise returns false.
1791 */
1792bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1793{
1794 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1795 IORESOURCE_EXCLUSIVE;
1796 bool skip_children = false, err = false;
1797 struct resource *p;
1798
1799 read_lock(&resource_lock);
1800 for_each_resource(root, p, skip_children) {
1801 if (p->start >= addr + size)
1802 break;
1803 if (p->end < addr) {
1804 skip_children = true;
1805 continue;
1806 }
1807 skip_children = false;
1808
1809 /*
1810 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1811 * IORESOURCE_EXCLUSIVE is set, even if they
1812 * are not busy and even if "iomem=relaxed" is set. The
1813 * responsible driver dynamically adds/removes system RAM within
1814 * such an area and uncontrolled access is dangerous.
1815 */
1816 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1817 err = true;
1818 break;
1819 }
1820
1821 /*
1822 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1823 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1824 * resource is busy.
1825 */
1826 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1827 continue;
1828 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1829 || p->flags & IORESOURCE_EXCLUSIVE) {
1830 err = true;
1831 break;
1832 }
1833 }
1834 read_unlock(&resource_lock);
1835
1836 return err;
1837}
1838
1839bool iomem_is_exclusive(u64 addr)
1840{
1841 return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1842 PAGE_SIZE);
1843}
1844
1845struct resource_entry *resource_list_create_entry(struct resource *res,
1846 size_t extra_size)
1847{
1848 struct resource_entry *entry;
1849
1850 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1851 if (entry) {
1852 INIT_LIST_HEAD(&entry->node);
1853 entry->res = res ? res : &entry->__res;
1854 }
1855
1856 return entry;
1857}
1858EXPORT_SYMBOL(resource_list_create_entry);
1859
1860void resource_list_free(struct list_head *head)
1861{
1862 struct resource_entry *entry, *tmp;
1863
1864 list_for_each_entry_safe(entry, tmp, head, node)
1865 resource_list_destroy_entry(entry);
1866}
1867EXPORT_SYMBOL(resource_list_free);
1868
1869#ifdef CONFIG_GET_FREE_REGION
1870#define GFR_DESCENDING (1UL << 0)
1871#define GFR_REQUEST_REGION (1UL << 1)
1872#ifdef PA_SECTION_SHIFT
1873#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1874#else
1875#define GFR_DEFAULT_ALIGN PAGE_SIZE
1876#endif
1877
1878static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1879 resource_size_t align, unsigned long flags)
1880{
1881 if (flags & GFR_DESCENDING) {
1882 resource_size_t end;
1883
1884 end = min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1885 return end - size + 1;
1886 }
1887
1888 return ALIGN(max(base->start, align), align);
1889}
1890
1891static bool gfr_continue(struct resource *base, resource_size_t addr,
1892 resource_size_t size, unsigned long flags)
1893{
1894 if (flags & GFR_DESCENDING)
1895 return addr > size && addr >= base->start;
1896 /*
1897 * In the ascend case be careful that the last increment by
1898 * @size did not wrap 0.
1899 */
1900 return addr > addr - size &&
1901 addr <= min_t(resource_size_t, base->end, DIRECT_MAP_PHYSMEM_END);
1902}
1903
1904static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1905 unsigned long flags)
1906{
1907 if (flags & GFR_DESCENDING)
1908 return addr - size;
1909 return addr + size;
1910}
1911
1912static void remove_free_mem_region(void *_res)
1913{
1914 struct resource *res = _res;
1915
1916 if (res->parent)
1917 remove_resource(res);
1918 free_resource(res);
1919}
1920
1921static struct resource *
1922get_free_mem_region(struct device *dev, struct resource *base,
1923 resource_size_t size, const unsigned long align,
1924 const char *name, const unsigned long desc,
1925 const unsigned long flags)
1926{
1927 resource_size_t addr;
1928 struct resource *res;
1929 struct region_devres *dr = NULL;
1930
1931 size = ALIGN(size, align);
1932
1933 res = alloc_resource(GFP_KERNEL);
1934 if (!res)
1935 return ERR_PTR(-ENOMEM);
1936
1937 if (dev && (flags & GFR_REQUEST_REGION)) {
1938 dr = devres_alloc(devm_region_release,
1939 sizeof(struct region_devres), GFP_KERNEL);
1940 if (!dr) {
1941 free_resource(res);
1942 return ERR_PTR(-ENOMEM);
1943 }
1944 } else if (dev) {
1945 if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1946 return ERR_PTR(-ENOMEM);
1947 }
1948
1949 write_lock(&resource_lock);
1950 for (addr = gfr_start(base, size, align, flags);
1951 gfr_continue(base, addr, align, flags);
1952 addr = gfr_next(addr, align, flags)) {
1953 if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1954 REGION_DISJOINT)
1955 continue;
1956
1957 if (flags & GFR_REQUEST_REGION) {
1958 if (__request_region_locked(res, &iomem_resource, addr,
1959 size, name, 0))
1960 break;
1961
1962 if (dev) {
1963 dr->parent = &iomem_resource;
1964 dr->start = addr;
1965 dr->n = size;
1966 devres_add(dev, dr);
1967 }
1968
1969 res->desc = desc;
1970 write_unlock(&resource_lock);
1971
1972
1973 /*
1974 * A driver is claiming this region so revoke any
1975 * mappings.
1976 */
1977 revoke_iomem(res);
1978 } else {
1979 res->start = addr;
1980 res->end = addr + size - 1;
1981 res->name = name;
1982 res->desc = desc;
1983 res->flags = IORESOURCE_MEM;
1984
1985 /*
1986 * Only succeed if the resource hosts an exclusive
1987 * range after the insert
1988 */
1989 if (__insert_resource(base, res) || res->child)
1990 break;
1991
1992 write_unlock(&resource_lock);
1993 }
1994
1995 return res;
1996 }
1997 write_unlock(&resource_lock);
1998
1999 if (flags & GFR_REQUEST_REGION) {
2000 free_resource(res);
2001 devres_free(dr);
2002 } else if (dev)
2003 devm_release_action(dev, remove_free_mem_region, res);
2004
2005 return ERR_PTR(-ERANGE);
2006}
2007
2008/**
2009 * devm_request_free_mem_region - find free region for device private memory
2010 *
2011 * @dev: device struct to bind the resource to
2012 * @size: size in bytes of the device memory to add
2013 * @base: resource tree to look in
2014 *
2015 * This function tries to find an empty range of physical address big enough to
2016 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2017 * memory, which in turn allocates struct pages.
2018 */
2019struct resource *devm_request_free_mem_region(struct device *dev,
2020 struct resource *base, unsigned long size)
2021{
2022 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2023
2024 return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2025 dev_name(dev),
2026 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2027}
2028EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2029
2030struct resource *request_free_mem_region(struct resource *base,
2031 unsigned long size, const char *name)
2032{
2033 unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2034
2035 return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2036 IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2037}
2038EXPORT_SYMBOL_GPL(request_free_mem_region);
2039
2040/**
2041 * alloc_free_mem_region - find a free region relative to @base
2042 * @base: resource that will parent the new resource
2043 * @size: size in bytes of memory to allocate from @base
2044 * @align: alignment requirements for the allocation
2045 * @name: resource name
2046 *
2047 * Buses like CXL, that can dynamically instantiate new memory regions,
2048 * need a method to allocate physical address space for those regions.
2049 * Allocate and insert a new resource to cover a free, unclaimed by a
2050 * descendant of @base, range in the span of @base.
2051 */
2052struct resource *alloc_free_mem_region(struct resource *base,
2053 unsigned long size, unsigned long align,
2054 const char *name)
2055{
2056 /* Default of ascending direction and insert resource */
2057 unsigned long flags = 0;
2058
2059 return get_free_mem_region(NULL, base, size, align, name,
2060 IORES_DESC_NONE, flags);
2061}
2062EXPORT_SYMBOL_GPL(alloc_free_mem_region);
2063#endif /* CONFIG_GET_FREE_REGION */
2064
2065static int __init strict_iomem(char *str)
2066{
2067 if (strstr(str, "relaxed"))
2068 strict_iomem_checks = 0;
2069 if (strstr(str, "strict"))
2070 strict_iomem_checks = 1;
2071 return 1;
2072}
2073
2074static int iomem_fs_init_fs_context(struct fs_context *fc)
2075{
2076 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2077}
2078
2079static struct file_system_type iomem_fs_type = {
2080 .name = "iomem",
2081 .owner = THIS_MODULE,
2082 .init_fs_context = iomem_fs_init_fs_context,
2083 .kill_sb = kill_anon_super,
2084};
2085
2086static int __init iomem_init_inode(void)
2087{
2088 static struct vfsmount *iomem_vfs_mount;
2089 static int iomem_fs_cnt;
2090 struct inode *inode;
2091 int rc;
2092
2093 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2094 if (rc < 0) {
2095 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2096 return rc;
2097 }
2098
2099 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2100 if (IS_ERR(inode)) {
2101 rc = PTR_ERR(inode);
2102 pr_err("Cannot allocate inode for iomem: %d\n", rc);
2103 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2104 return rc;
2105 }
2106
2107 /*
2108 * Publish iomem revocation inode initialized.
2109 * Pairs with smp_load_acquire() in revoke_iomem().
2110 */
2111 smp_store_release(&iomem_inode, inode);
2112
2113 return 0;
2114}
2115
2116fs_initcall(iomem_init_inode);
2117
2118__setup("iomem=", strict_iomem);