Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/kernel/resource.c
   4 *
   5 * Copyright (C) 1999	Linus Torvalds
   6 * Copyright (C) 1999	Martin Mares <mj@ucw.cz>
   7 *
   8 * Arbitrary resource management.
   9 */
  10
  11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12
  13#include <linux/export.h>
  14#include <linux/errno.h>
  15#include <linux/ioport.h>
  16#include <linux/init.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/fs.h>
  20#include <linux/proc_fs.h>
  21#include <linux/pseudo_fs.h>
  22#include <linux/sched.h>
  23#include <linux/seq_file.h>
  24#include <linux/device.h>
  25#include <linux/pfn.h>
  26#include <linux/mm.h>
  27#include <linux/mount.h>
  28#include <linux/resource_ext.h>
  29#include <uapi/linux/magic.h>
  30#include <linux/string.h>
  31#include <linux/vmalloc.h>
  32#include <asm/io.h>
  33
  34
  35struct resource ioport_resource = {
  36	.name	= "PCI IO",
  37	.start	= 0,
  38	.end	= IO_SPACE_LIMIT,
  39	.flags	= IORESOURCE_IO,
  40};
  41EXPORT_SYMBOL(ioport_resource);
  42
  43struct resource iomem_resource = {
  44	.name	= "PCI mem",
  45	.start	= 0,
  46	.end	= -1,
  47	.flags	= IORESOURCE_MEM,
  48};
  49EXPORT_SYMBOL(iomem_resource);
  50
  51/* constraints to be met while allocating resources */
  52struct resource_constraint {
  53	resource_size_t min, max, align;
  54	resource_size_t (*alignf)(void *, const struct resource *,
  55			resource_size_t, resource_size_t);
  56	void *alignf_data;
  57};
  58
  59static DEFINE_RWLOCK(resource_lock);
  60
  61static struct resource *next_resource(struct resource *p, bool skip_children)
  62{
  63	if (!skip_children && p->child)
  64		return p->child;
  65	while (!p->sibling && p->parent)
  66		p = p->parent;
  67	return p->sibling;
  68}
  69
 
 
 
 
 
 
 
  70#define for_each_resource(_root, _p, _skip_children) \
  71	for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
 
 
 
 
 
 
 
 
 
  72
  73#ifdef CONFIG_PROC_FS
  74
  75enum { MAX_IORES_LEVEL = 5 };
  76
  77static void *r_start(struct seq_file *m, loff_t *pos)
  78	__acquires(resource_lock)
  79{
  80	struct resource *root = pde_data(file_inode(m->file));
  81	struct resource *p;
  82	loff_t l = *pos;
  83
  84	read_lock(&resource_lock);
  85	for_each_resource(root, p, false) {
  86		if (l-- == 0)
  87			break;
  88	}
  89
  90	return p;
  91}
  92
  93static void *r_next(struct seq_file *m, void *v, loff_t *pos)
  94{
  95	struct resource *p = v;
  96
  97	(*pos)++;
  98
  99	return (void *)next_resource(p, false);
 100}
 101
 102static void r_stop(struct seq_file *m, void *v)
 103	__releases(resource_lock)
 104{
 105	read_unlock(&resource_lock);
 106}
 107
 108static int r_show(struct seq_file *m, void *v)
 109{
 110	struct resource *root = pde_data(file_inode(m->file));
 111	struct resource *r = v, *p;
 112	unsigned long long start, end;
 113	int width = root->end < 0x10000 ? 4 : 8;
 114	int depth;
 115
 116	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
 117		if (p->parent == root)
 118			break;
 119
 120	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
 121		start = r->start;
 122		end = r->end;
 123	} else {
 124		start = end = 0;
 125	}
 126
 127	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
 128			depth * 2, "",
 129			width, start,
 130			width, end,
 131			r->name ? r->name : "<BAD>");
 132	return 0;
 133}
 134
 135static const struct seq_operations resource_op = {
 136	.start	= r_start,
 137	.next	= r_next,
 138	.stop	= r_stop,
 139	.show	= r_show,
 140};
 141
 142static int __init ioresources_init(void)
 143{
 144	proc_create_seq_data("ioports", 0, NULL, &resource_op,
 145			&ioport_resource);
 146	proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
 147	return 0;
 148}
 149__initcall(ioresources_init);
 150
 151#endif /* CONFIG_PROC_FS */
 152
 153static void free_resource(struct resource *res)
 154{
 155	/**
 156	 * If the resource was allocated using memblock early during boot
 157	 * we'll leak it here: we can only return full pages back to the
 158	 * buddy and trying to be smart and reusing them eventually in
 159	 * alloc_resource() overcomplicates resource handling.
 160	 */
 161	if (res && PageSlab(virt_to_head_page(res)))
 162		kfree(res);
 163}
 164
 165static struct resource *alloc_resource(gfp_t flags)
 166{
 167	return kzalloc(sizeof(struct resource), flags);
 168}
 169
 170/* Return the conflict entry if you can't request it */
 171static struct resource * __request_resource(struct resource *root, struct resource *new)
 172{
 173	resource_size_t start = new->start;
 174	resource_size_t end = new->end;
 175	struct resource *tmp, **p;
 176
 177	if (end < start)
 178		return root;
 179	if (start < root->start)
 180		return root;
 181	if (end > root->end)
 182		return root;
 183	p = &root->child;
 184	for (;;) {
 185		tmp = *p;
 186		if (!tmp || tmp->start > end) {
 187			new->sibling = tmp;
 188			*p = new;
 189			new->parent = root;
 190			return NULL;
 191		}
 192		p = &tmp->sibling;
 193		if (tmp->end < start)
 194			continue;
 195		return tmp;
 196	}
 197}
 198
 199static int __release_resource(struct resource *old, bool release_child)
 200{
 201	struct resource *tmp, **p, *chd;
 202
 203	p = &old->parent->child;
 204	for (;;) {
 205		tmp = *p;
 206		if (!tmp)
 207			break;
 208		if (tmp == old) {
 209			if (release_child || !(tmp->child)) {
 210				*p = tmp->sibling;
 211			} else {
 212				for (chd = tmp->child;; chd = chd->sibling) {
 213					chd->parent = tmp->parent;
 214					if (!(chd->sibling))
 215						break;
 216				}
 217				*p = tmp->child;
 218				chd->sibling = tmp->sibling;
 219			}
 220			old->parent = NULL;
 221			return 0;
 222		}
 223		p = &tmp->sibling;
 224	}
 225	return -EINVAL;
 226}
 227
 228static void __release_child_resources(struct resource *r)
 229{
 230	struct resource *tmp, *p;
 231	resource_size_t size;
 232
 233	p = r->child;
 234	r->child = NULL;
 235	while (p) {
 236		tmp = p;
 237		p = p->sibling;
 238
 239		tmp->parent = NULL;
 240		tmp->sibling = NULL;
 241		__release_child_resources(tmp);
 242
 243		printk(KERN_DEBUG "release child resource %pR\n", tmp);
 244		/* need to restore size, and keep flags */
 245		size = resource_size(tmp);
 246		tmp->start = 0;
 247		tmp->end = size - 1;
 248	}
 249}
 250
 251void release_child_resources(struct resource *r)
 252{
 253	write_lock(&resource_lock);
 254	__release_child_resources(r);
 255	write_unlock(&resource_lock);
 256}
 257
 258/**
 259 * request_resource_conflict - request and reserve an I/O or memory resource
 260 * @root: root resource descriptor
 261 * @new: resource descriptor desired by caller
 262 *
 263 * Returns 0 for success, conflict resource on error.
 264 */
 265struct resource *request_resource_conflict(struct resource *root, struct resource *new)
 266{
 267	struct resource *conflict;
 268
 269	write_lock(&resource_lock);
 270	conflict = __request_resource(root, new);
 271	write_unlock(&resource_lock);
 272	return conflict;
 273}
 274
 275/**
 276 * request_resource - request and reserve an I/O or memory resource
 277 * @root: root resource descriptor
 278 * @new: resource descriptor desired by caller
 279 *
 280 * Returns 0 for success, negative error code on error.
 281 */
 282int request_resource(struct resource *root, struct resource *new)
 283{
 284	struct resource *conflict;
 285
 286	conflict = request_resource_conflict(root, new);
 287	return conflict ? -EBUSY : 0;
 288}
 289
 290EXPORT_SYMBOL(request_resource);
 291
 292/**
 293 * release_resource - release a previously reserved resource
 294 * @old: resource pointer
 295 */
 296int release_resource(struct resource *old)
 297{
 298	int retval;
 299
 300	write_lock(&resource_lock);
 301	retval = __release_resource(old, true);
 302	write_unlock(&resource_lock);
 303	return retval;
 304}
 305
 306EXPORT_SYMBOL(release_resource);
 307
 308/**
 309 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
 310 *			 [@start..@end].
 311 *
 312 * If a resource is found, returns 0 and @*res is overwritten with the part
 313 * of the resource that's within [@start..@end]; if none is found, returns
 314 * -ENODEV.  Returns -EINVAL for invalid parameters.
 315 *
 316 * @start:	start address of the resource searched for
 317 * @end:	end address of same resource
 318 * @flags:	flags which the resource must have
 319 * @desc:	descriptor the resource must have
 320 * @res:	return ptr, if resource found
 321 *
 322 * The caller must specify @start, @end, @flags, and @desc
 323 * (which may be IORES_DESC_NONE).
 324 */
 325static int find_next_iomem_res(resource_size_t start, resource_size_t end,
 326			       unsigned long flags, unsigned long desc,
 327			       struct resource *res)
 328{
 329	struct resource *p;
 330
 331	if (!res)
 332		return -EINVAL;
 333
 334	if (start >= end)
 335		return -EINVAL;
 336
 337	read_lock(&resource_lock);
 338
 339	for_each_resource(&iomem_resource, p, false) {
 340		/* If we passed the resource we are looking for, stop */
 341		if (p->start > end) {
 342			p = NULL;
 343			break;
 344		}
 345
 346		/* Skip until we find a range that matches what we look for */
 347		if (p->end < start)
 348			continue;
 349
 350		if ((p->flags & flags) != flags)
 351			continue;
 352		if ((desc != IORES_DESC_NONE) && (desc != p->desc))
 353			continue;
 354
 355		/* Found a match, break */
 356		break;
 357	}
 358
 359	if (p) {
 360		/* copy data */
 361		*res = (struct resource) {
 362			.start = max(start, p->start),
 363			.end = min(end, p->end),
 364			.flags = p->flags,
 365			.desc = p->desc,
 366			.parent = p->parent,
 367		};
 368	}
 369
 370	read_unlock(&resource_lock);
 371	return p ? 0 : -ENODEV;
 372}
 373
 374static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
 375				 unsigned long flags, unsigned long desc,
 376				 void *arg,
 377				 int (*func)(struct resource *, void *))
 378{
 379	struct resource res;
 380	int ret = -EINVAL;
 381
 382	while (start < end &&
 383	       !find_next_iomem_res(start, end, flags, desc, &res)) {
 384		ret = (*func)(&res, arg);
 385		if (ret)
 386			break;
 387
 388		start = res.end + 1;
 389	}
 390
 391	return ret;
 392}
 393
 394/**
 395 * walk_iomem_res_desc - Walks through iomem resources and calls func()
 396 *			 with matching resource ranges.
 397 * *
 398 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
 399 * @flags: I/O resource flags
 400 * @start: start addr
 401 * @end: end addr
 402 * @arg: function argument for the callback @func
 403 * @func: callback function that is called for each qualifying resource area
 404 *
 405 * All the memory ranges which overlap start,end and also match flags and
 406 * desc are valid candidates.
 407 *
 408 * NOTE: For a new descriptor search, define a new IORES_DESC in
 409 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
 410 */
 411int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
 412		u64 end, void *arg, int (*func)(struct resource *, void *))
 413{
 414	return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
 415}
 416EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
 417
 418/*
 419 * This function calls the @func callback against all memory ranges of type
 420 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
 421 * Now, this function is only for System RAM, it deals with full ranges and
 422 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
 423 * ranges.
 424 */
 425int walk_system_ram_res(u64 start, u64 end, void *arg,
 426			int (*func)(struct resource *, void *))
 427{
 428	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 429
 430	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
 431				     func);
 432}
 433
 434/*
 435 * This function, being a variant of walk_system_ram_res(), calls the @func
 436 * callback against all memory ranges of type System RAM which are marked as
 437 * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
 438 * higher to lower.
 439 */
 440int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
 441				int (*func)(struct resource *, void *))
 442{
 443	struct resource res, *rams;
 444	int rams_size = 16, i;
 445	unsigned long flags;
 446	int ret = -1;
 447
 448	/* create a list */
 449	rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
 450	if (!rams)
 451		return ret;
 452
 453	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 454	i = 0;
 455	while ((start < end) &&
 456		(!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
 457		if (i >= rams_size) {
 458			/* re-alloc */
 459			struct resource *rams_new;
 460
 461			rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
 462					     (rams_size + 16) * sizeof(struct resource),
 463					     GFP_KERNEL);
 464			if (!rams_new)
 465				goto out;
 466
 467			rams = rams_new;
 468			rams_size += 16;
 469		}
 470
 471		rams[i].start = res.start;
 472		rams[i++].end = res.end;
 473
 474		start = res.end + 1;
 475	}
 476
 477	/* go reverse */
 478	for (i--; i >= 0; i--) {
 479		ret = (*func)(&rams[i], arg);
 480		if (ret)
 481			break;
 482	}
 483
 484out:
 485	kvfree(rams);
 486	return ret;
 487}
 488
 489/*
 490 * This function calls the @func callback against all memory ranges, which
 491 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
 492 */
 493int walk_mem_res(u64 start, u64 end, void *arg,
 494		 int (*func)(struct resource *, void *))
 495{
 496	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 497
 498	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
 499				     func);
 500}
 501
 502/*
 503 * This function calls the @func callback against all memory ranges of type
 504 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
 505 * It is to be used only for System RAM.
 506 */
 507int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 508			  void *arg, int (*func)(unsigned long, unsigned long, void *))
 509{
 510	resource_size_t start, end;
 511	unsigned long flags;
 512	struct resource res;
 513	unsigned long pfn, end_pfn;
 514	int ret = -EINVAL;
 515
 516	start = (u64) start_pfn << PAGE_SHIFT;
 517	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
 518	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 519	while (start < end &&
 520	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
 521		pfn = PFN_UP(res.start);
 522		end_pfn = PFN_DOWN(res.end + 1);
 523		if (end_pfn > pfn)
 524			ret = (*func)(pfn, end_pfn - pfn, arg);
 525		if (ret)
 526			break;
 527		start = res.end + 1;
 528	}
 529	return ret;
 530}
 531
 532static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
 533{
 534	return 1;
 535}
 536
 537/*
 538 * This generic page_is_ram() returns true if specified address is
 539 * registered as System RAM in iomem_resource list.
 540 */
 541int __weak page_is_ram(unsigned long pfn)
 542{
 543	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
 544}
 545EXPORT_SYMBOL_GPL(page_is_ram);
 546
 547static int __region_intersects(struct resource *parent, resource_size_t start,
 548			       size_t size, unsigned long flags,
 549			       unsigned long desc)
 550{
 551	struct resource res;
 552	int type = 0; int other = 0;
 553	struct resource *p;
 554
 555	res.start = start;
 556	res.end = start + size - 1;
 557
 558	for (p = parent->child; p ; p = p->sibling) {
 559		bool is_type = (((p->flags & flags) == flags) &&
 560				((desc == IORES_DESC_NONE) ||
 561				 (desc == p->desc)));
 562
 563		if (resource_overlaps(p, &res))
 564			is_type ? type++ : other++;
 565	}
 566
 567	if (type == 0)
 568		return REGION_DISJOINT;
 569
 570	if (other == 0)
 571		return REGION_INTERSECTS;
 572
 573	return REGION_MIXED;
 574}
 575
 576/**
 577 * region_intersects() - determine intersection of region with known resources
 578 * @start: region start address
 579 * @size: size of region
 580 * @flags: flags of resource (in iomem_resource)
 581 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
 582 *
 583 * Check if the specified region partially overlaps or fully eclipses a
 584 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
 585 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
 586 * return REGION_MIXED if the region overlaps @flags/@desc and another
 587 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
 588 * and no other defined resource. Note that REGION_INTERSECTS is also
 589 * returned in the case when the specified region overlaps RAM and undefined
 590 * memory holes.
 591 *
 592 * region_intersect() is used by memory remapping functions to ensure
 593 * the user is not remapping RAM and is a vast speed up over walking
 594 * through the resource table page by page.
 595 */
 596int region_intersects(resource_size_t start, size_t size, unsigned long flags,
 597		      unsigned long desc)
 598{
 599	int ret;
 600
 601	read_lock(&resource_lock);
 602	ret = __region_intersects(&iomem_resource, start, size, flags, desc);
 603	read_unlock(&resource_lock);
 604
 605	return ret;
 606}
 607EXPORT_SYMBOL_GPL(region_intersects);
 608
 609void __weak arch_remove_reservations(struct resource *avail)
 610{
 611}
 612
 613static resource_size_t simple_align_resource(void *data,
 614					     const struct resource *avail,
 615					     resource_size_t size,
 616					     resource_size_t align)
 617{
 618	return avail->start;
 619}
 620
 621static void resource_clip(struct resource *res, resource_size_t min,
 622			  resource_size_t max)
 623{
 624	if (res->start < min)
 625		res->start = min;
 626	if (res->end > max)
 627		res->end = max;
 628}
 629
 630/*
 631 * Find empty slot in the resource tree with the given range and
 632 * alignment constraints
 633 */
 634static int __find_resource(struct resource *root, struct resource *old,
 635			 struct resource *new,
 636			 resource_size_t  size,
 637			 struct resource_constraint *constraint)
 638{
 639	struct resource *this = root->child;
 640	struct resource tmp = *new, avail, alloc;
 641
 642	tmp.start = root->start;
 643	/*
 644	 * Skip past an allocated resource that starts at 0, since the assignment
 645	 * of this->start - 1 to tmp->end below would cause an underflow.
 646	 */
 647	if (this && this->start == root->start) {
 648		tmp.start = (this == old) ? old->start : this->end + 1;
 649		this = this->sibling;
 650	}
 651	for(;;) {
 652		if (this)
 653			tmp.end = (this == old) ?  this->end : this->start - 1;
 654		else
 655			tmp.end = root->end;
 656
 657		if (tmp.end < tmp.start)
 658			goto next;
 659
 660		resource_clip(&tmp, constraint->min, constraint->max);
 661		arch_remove_reservations(&tmp);
 662
 663		/* Check for overflow after ALIGN() */
 664		avail.start = ALIGN(tmp.start, constraint->align);
 665		avail.end = tmp.end;
 666		avail.flags = new->flags & ~IORESOURCE_UNSET;
 667		if (avail.start >= tmp.start) {
 668			alloc.flags = avail.flags;
 669			alloc.start = constraint->alignf(constraint->alignf_data, &avail,
 670					size, constraint->align);
 671			alloc.end = alloc.start + size - 1;
 672			if (alloc.start <= alloc.end &&
 673			    resource_contains(&avail, &alloc)) {
 674				new->start = alloc.start;
 675				new->end = alloc.end;
 676				return 0;
 677			}
 678		}
 679
 680next:		if (!this || this->end == root->end)
 681			break;
 682
 683		if (this != old)
 684			tmp.start = this->end + 1;
 685		this = this->sibling;
 686	}
 687	return -EBUSY;
 688}
 689
 690/*
 691 * Find empty slot in the resource tree given range and alignment.
 692 */
 693static int find_resource(struct resource *root, struct resource *new,
 694			resource_size_t size,
 695			struct resource_constraint  *constraint)
 696{
 697	return  __find_resource(root, NULL, new, size, constraint);
 698}
 699
 700/**
 701 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
 702 *	The resource will be relocated if the new size cannot be reallocated in the
 703 *	current location.
 704 *
 705 * @root: root resource descriptor
 706 * @old:  resource descriptor desired by caller
 707 * @newsize: new size of the resource descriptor
 708 * @constraint: the size and alignment constraints to be met.
 709 */
 710static int reallocate_resource(struct resource *root, struct resource *old,
 711			       resource_size_t newsize,
 712			       struct resource_constraint *constraint)
 713{
 714	int err=0;
 715	struct resource new = *old;
 716	struct resource *conflict;
 717
 718	write_lock(&resource_lock);
 719
 720	if ((err = __find_resource(root, old, &new, newsize, constraint)))
 721		goto out;
 722
 723	if (resource_contains(&new, old)) {
 724		old->start = new.start;
 725		old->end = new.end;
 726		goto out;
 727	}
 728
 729	if (old->child) {
 730		err = -EBUSY;
 731		goto out;
 732	}
 733
 734	if (resource_contains(old, &new)) {
 735		old->start = new.start;
 736		old->end = new.end;
 737	} else {
 738		__release_resource(old, true);
 739		*old = new;
 740		conflict = __request_resource(root, old);
 741		BUG_ON(conflict);
 742	}
 743out:
 744	write_unlock(&resource_lock);
 745	return err;
 746}
 747
 748
 749/**
 750 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
 751 * 	The resource will be reallocated with a new size if it was already allocated
 752 * @root: root resource descriptor
 753 * @new: resource descriptor desired by caller
 754 * @size: requested resource region size
 755 * @min: minimum boundary to allocate
 756 * @max: maximum boundary to allocate
 757 * @align: alignment requested, in bytes
 758 * @alignf: alignment function, optional, called if not NULL
 759 * @alignf_data: arbitrary data to pass to the @alignf function
 760 */
 761int allocate_resource(struct resource *root, struct resource *new,
 762		      resource_size_t size, resource_size_t min,
 763		      resource_size_t max, resource_size_t align,
 764		      resource_size_t (*alignf)(void *,
 765						const struct resource *,
 766						resource_size_t,
 767						resource_size_t),
 768		      void *alignf_data)
 769{
 770	int err;
 771	struct resource_constraint constraint;
 772
 773	if (!alignf)
 774		alignf = simple_align_resource;
 775
 776	constraint.min = min;
 777	constraint.max = max;
 778	constraint.align = align;
 779	constraint.alignf = alignf;
 780	constraint.alignf_data = alignf_data;
 781
 782	if ( new->parent ) {
 783		/* resource is already allocated, try reallocating with
 784		   the new constraints */
 785		return reallocate_resource(root, new, size, &constraint);
 786	}
 787
 788	write_lock(&resource_lock);
 789	err = find_resource(root, new, size, &constraint);
 790	if (err >= 0 && __request_resource(root, new))
 791		err = -EBUSY;
 792	write_unlock(&resource_lock);
 793	return err;
 794}
 795
 796EXPORT_SYMBOL(allocate_resource);
 797
 798/**
 799 * lookup_resource - find an existing resource by a resource start address
 800 * @root: root resource descriptor
 801 * @start: resource start address
 802 *
 803 * Returns a pointer to the resource if found, NULL otherwise
 804 */
 805struct resource *lookup_resource(struct resource *root, resource_size_t start)
 806{
 807	struct resource *res;
 808
 809	read_lock(&resource_lock);
 810	for (res = root->child; res; res = res->sibling) {
 811		if (res->start == start)
 812			break;
 813	}
 814	read_unlock(&resource_lock);
 815
 816	return res;
 817}
 818
 819/*
 820 * Insert a resource into the resource tree. If successful, return NULL,
 821 * otherwise return the conflicting resource (compare to __request_resource())
 822 */
 823static struct resource * __insert_resource(struct resource *parent, struct resource *new)
 824{
 825	struct resource *first, *next;
 826
 827	for (;; parent = first) {
 828		first = __request_resource(parent, new);
 829		if (!first)
 830			return first;
 831
 832		if (first == parent)
 833			return first;
 834		if (WARN_ON(first == new))	/* duplicated insertion */
 835			return first;
 836
 837		if ((first->start > new->start) || (first->end < new->end))
 838			break;
 839		if ((first->start == new->start) && (first->end == new->end))
 840			break;
 841	}
 842
 843	for (next = first; ; next = next->sibling) {
 844		/* Partial overlap? Bad, and unfixable */
 845		if (next->start < new->start || next->end > new->end)
 846			return next;
 847		if (!next->sibling)
 848			break;
 849		if (next->sibling->start > new->end)
 850			break;
 851	}
 852
 853	new->parent = parent;
 854	new->sibling = next->sibling;
 855	new->child = first;
 856
 857	next->sibling = NULL;
 858	for (next = first; next; next = next->sibling)
 859		next->parent = new;
 860
 861	if (parent->child == first) {
 862		parent->child = new;
 863	} else {
 864		next = parent->child;
 865		while (next->sibling != first)
 866			next = next->sibling;
 867		next->sibling = new;
 868	}
 869	return NULL;
 870}
 871
 872/**
 873 * insert_resource_conflict - Inserts resource in the resource tree
 874 * @parent: parent of the new resource
 875 * @new: new resource to insert
 876 *
 877 * Returns 0 on success, conflict resource if the resource can't be inserted.
 878 *
 879 * This function is equivalent to request_resource_conflict when no conflict
 880 * happens. If a conflict happens, and the conflicting resources
 881 * entirely fit within the range of the new resource, then the new
 882 * resource is inserted and the conflicting resources become children of
 883 * the new resource.
 884 *
 885 * This function is intended for producers of resources, such as FW modules
 886 * and bus drivers.
 887 */
 888struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
 889{
 890	struct resource *conflict;
 891
 892	write_lock(&resource_lock);
 893	conflict = __insert_resource(parent, new);
 894	write_unlock(&resource_lock);
 895	return conflict;
 896}
 897
 898/**
 899 * insert_resource - Inserts a resource in the resource tree
 900 * @parent: parent of the new resource
 901 * @new: new resource to insert
 902 *
 903 * Returns 0 on success, -EBUSY if the resource can't be inserted.
 904 *
 905 * This function is intended for producers of resources, such as FW modules
 906 * and bus drivers.
 907 */
 908int insert_resource(struct resource *parent, struct resource *new)
 909{
 910	struct resource *conflict;
 911
 912	conflict = insert_resource_conflict(parent, new);
 913	return conflict ? -EBUSY : 0;
 914}
 915EXPORT_SYMBOL_GPL(insert_resource);
 916
 917/**
 918 * insert_resource_expand_to_fit - Insert a resource into the resource tree
 919 * @root: root resource descriptor
 920 * @new: new resource to insert
 921 *
 922 * Insert a resource into the resource tree, possibly expanding it in order
 923 * to make it encompass any conflicting resources.
 924 */
 925void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
 926{
 927	if (new->parent)
 928		return;
 929
 930	write_lock(&resource_lock);
 931	for (;;) {
 932		struct resource *conflict;
 933
 934		conflict = __insert_resource(root, new);
 935		if (!conflict)
 936			break;
 937		if (conflict == root)
 938			break;
 939
 940		/* Ok, expand resource to cover the conflict, then try again .. */
 941		if (conflict->start < new->start)
 942			new->start = conflict->start;
 943		if (conflict->end > new->end)
 944			new->end = conflict->end;
 945
 946		pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
 947	}
 948	write_unlock(&resource_lock);
 949}
 950/*
 951 * Not for general consumption, only early boot memory map parsing, PCI
 952 * resource discovery, and late discovery of CXL resources are expected
 953 * to use this interface. The former are built-in and only the latter,
 954 * CXL, is a module.
 955 */
 956EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
 957
 958/**
 959 * remove_resource - Remove a resource in the resource tree
 960 * @old: resource to remove
 961 *
 962 * Returns 0 on success, -EINVAL if the resource is not valid.
 963 *
 964 * This function removes a resource previously inserted by insert_resource()
 965 * or insert_resource_conflict(), and moves the children (if any) up to
 966 * where they were before.  insert_resource() and insert_resource_conflict()
 967 * insert a new resource, and move any conflicting resources down to the
 968 * children of the new resource.
 969 *
 970 * insert_resource(), insert_resource_conflict() and remove_resource() are
 971 * intended for producers of resources, such as FW modules and bus drivers.
 972 */
 973int remove_resource(struct resource *old)
 974{
 975	int retval;
 976
 977	write_lock(&resource_lock);
 978	retval = __release_resource(old, false);
 979	write_unlock(&resource_lock);
 980	return retval;
 981}
 982EXPORT_SYMBOL_GPL(remove_resource);
 983
 984static int __adjust_resource(struct resource *res, resource_size_t start,
 985				resource_size_t size)
 986{
 987	struct resource *tmp, *parent = res->parent;
 988	resource_size_t end = start + size - 1;
 989	int result = -EBUSY;
 990
 991	if (!parent)
 992		goto skip;
 993
 994	if ((start < parent->start) || (end > parent->end))
 995		goto out;
 996
 997	if (res->sibling && (res->sibling->start <= end))
 998		goto out;
 999
1000	tmp = parent->child;
1001	if (tmp != res) {
1002		while (tmp->sibling != res)
1003			tmp = tmp->sibling;
1004		if (start <= tmp->end)
1005			goto out;
1006	}
1007
1008skip:
1009	for (tmp = res->child; tmp; tmp = tmp->sibling)
1010		if ((tmp->start < start) || (tmp->end > end))
1011			goto out;
1012
1013	res->start = start;
1014	res->end = end;
1015	result = 0;
1016
1017 out:
1018	return result;
1019}
1020
1021/**
1022 * adjust_resource - modify a resource's start and size
1023 * @res: resource to modify
1024 * @start: new start value
1025 * @size: new size
1026 *
1027 * Given an existing resource, change its start and size to match the
1028 * arguments.  Returns 0 on success, -EBUSY if it can't fit.
1029 * Existing children of the resource are assumed to be immutable.
1030 */
1031int adjust_resource(struct resource *res, resource_size_t start,
1032		    resource_size_t size)
1033{
1034	int result;
1035
1036	write_lock(&resource_lock);
1037	result = __adjust_resource(res, start, size);
1038	write_unlock(&resource_lock);
1039	return result;
1040}
1041EXPORT_SYMBOL(adjust_resource);
1042
1043static void __init
1044__reserve_region_with_split(struct resource *root, resource_size_t start,
1045			    resource_size_t end, const char *name)
1046{
1047	struct resource *parent = root;
1048	struct resource *conflict;
1049	struct resource *res = alloc_resource(GFP_ATOMIC);
1050	struct resource *next_res = NULL;
1051	int type = resource_type(root);
1052
1053	if (!res)
1054		return;
1055
1056	res->name = name;
1057	res->start = start;
1058	res->end = end;
1059	res->flags = type | IORESOURCE_BUSY;
1060	res->desc = IORES_DESC_NONE;
1061
1062	while (1) {
1063
1064		conflict = __request_resource(parent, res);
1065		if (!conflict) {
1066			if (!next_res)
1067				break;
1068			res = next_res;
1069			next_res = NULL;
1070			continue;
1071		}
1072
1073		/* conflict covered whole area */
1074		if (conflict->start <= res->start &&
1075				conflict->end >= res->end) {
1076			free_resource(res);
1077			WARN_ON(next_res);
1078			break;
1079		}
1080
1081		/* failed, split and try again */
1082		if (conflict->start > res->start) {
1083			end = res->end;
1084			res->end = conflict->start - 1;
1085			if (conflict->end < end) {
1086				next_res = alloc_resource(GFP_ATOMIC);
1087				if (!next_res) {
1088					free_resource(res);
1089					break;
1090				}
1091				next_res->name = name;
1092				next_res->start = conflict->end + 1;
1093				next_res->end = end;
1094				next_res->flags = type | IORESOURCE_BUSY;
1095				next_res->desc = IORES_DESC_NONE;
1096			}
1097		} else {
1098			res->start = conflict->end + 1;
1099		}
1100	}
1101
1102}
1103
1104void __init
1105reserve_region_with_split(struct resource *root, resource_size_t start,
1106			  resource_size_t end, const char *name)
1107{
1108	int abort = 0;
1109
1110	write_lock(&resource_lock);
1111	if (root->start > start || root->end < end) {
1112		pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1113		       (unsigned long long)start, (unsigned long long)end,
1114		       root);
1115		if (start > root->end || end < root->start)
1116			abort = 1;
1117		else {
1118			if (end > root->end)
1119				end = root->end;
1120			if (start < root->start)
1121				start = root->start;
1122			pr_err("fixing request to [0x%llx-0x%llx]\n",
1123			       (unsigned long long)start,
1124			       (unsigned long long)end);
1125		}
1126		dump_stack();
1127	}
1128	if (!abort)
1129		__reserve_region_with_split(root, start, end, name);
1130	write_unlock(&resource_lock);
1131}
1132
1133/**
1134 * resource_alignment - calculate resource's alignment
1135 * @res: resource pointer
1136 *
1137 * Returns alignment on success, 0 (invalid alignment) on failure.
1138 */
1139resource_size_t resource_alignment(struct resource *res)
1140{
1141	switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1142	case IORESOURCE_SIZEALIGN:
1143		return resource_size(res);
1144	case IORESOURCE_STARTALIGN:
1145		return res->start;
1146	default:
1147		return 0;
1148	}
1149}
1150
1151/*
1152 * This is compatibility stuff for IO resources.
1153 *
1154 * Note how this, unlike the above, knows about
1155 * the IO flag meanings (busy etc).
1156 *
1157 * request_region creates a new busy region.
1158 *
1159 * release_region releases a matching busy region.
1160 */
1161
1162static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1163
1164static struct inode *iomem_inode;
1165
1166#ifdef CONFIG_IO_STRICT_DEVMEM
1167static void revoke_iomem(struct resource *res)
1168{
1169	/* pairs with smp_store_release() in iomem_init_inode() */
1170	struct inode *inode = smp_load_acquire(&iomem_inode);
1171
1172	/*
1173	 * Check that the initialization has completed. Losing the race
1174	 * is ok because it means drivers are claiming resources before
1175	 * the fs_initcall level of init and prevent iomem_get_mapping users
1176	 * from establishing mappings.
1177	 */
1178	if (!inode)
1179		return;
1180
1181	/*
1182	 * The expectation is that the driver has successfully marked
1183	 * the resource busy by this point, so devmem_is_allowed()
1184	 * should start returning false, however for performance this
1185	 * does not iterate the entire resource range.
1186	 */
1187	if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1188	    devmem_is_allowed(PHYS_PFN(res->end))) {
1189		/*
1190		 * *cringe* iomem=relaxed says "go ahead, what's the
1191		 * worst that can happen?"
1192		 */
1193		return;
1194	}
1195
1196	unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1197}
1198#else
1199static void revoke_iomem(struct resource *res) {}
1200#endif
1201
1202struct address_space *iomem_get_mapping(void)
1203{
1204	/*
1205	 * This function is only called from file open paths, hence guaranteed
1206	 * that fs_initcalls have completed and no need to check for NULL. But
1207	 * since revoke_iomem can be called before the initcall we still need
1208	 * the barrier to appease checkers.
1209	 */
1210	return smp_load_acquire(&iomem_inode)->i_mapping;
1211}
1212
1213static int __request_region_locked(struct resource *res, struct resource *parent,
1214				   resource_size_t start, resource_size_t n,
1215				   const char *name, int flags)
1216{
1217	DECLARE_WAITQUEUE(wait, current);
1218
1219	res->name = name;
1220	res->start = start;
1221	res->end = start + n - 1;
1222
1223	for (;;) {
1224		struct resource *conflict;
1225
1226		res->flags = resource_type(parent) | resource_ext_type(parent);
1227		res->flags |= IORESOURCE_BUSY | flags;
1228		res->desc = parent->desc;
1229
1230		conflict = __request_resource(parent, res);
1231		if (!conflict)
1232			break;
1233		/*
1234		 * mm/hmm.c reserves physical addresses which then
1235		 * become unavailable to other users.  Conflicts are
1236		 * not expected.  Warn to aid debugging if encountered.
1237		 */
1238		if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1239			pr_warn("Unaddressable device %s %pR conflicts with %pR",
1240				conflict->name, conflict, res);
1241		}
1242		if (conflict != parent) {
1243			if (!(conflict->flags & IORESOURCE_BUSY)) {
1244				parent = conflict;
1245				continue;
1246			}
1247		}
1248		if (conflict->flags & flags & IORESOURCE_MUXED) {
1249			add_wait_queue(&muxed_resource_wait, &wait);
1250			write_unlock(&resource_lock);
1251			set_current_state(TASK_UNINTERRUPTIBLE);
1252			schedule();
1253			remove_wait_queue(&muxed_resource_wait, &wait);
1254			write_lock(&resource_lock);
1255			continue;
1256		}
1257		/* Uhhuh, that didn't work out.. */
1258		return -EBUSY;
1259	}
1260
1261	return 0;
1262}
1263
1264/**
1265 * __request_region - create a new busy resource region
1266 * @parent: parent resource descriptor
1267 * @start: resource start address
1268 * @n: resource region size
1269 * @name: reserving caller's ID string
1270 * @flags: IO resource flags
1271 */
1272struct resource *__request_region(struct resource *parent,
1273				  resource_size_t start, resource_size_t n,
1274				  const char *name, int flags)
1275{
1276	struct resource *res = alloc_resource(GFP_KERNEL);
1277	int ret;
1278
1279	if (!res)
1280		return NULL;
1281
1282	write_lock(&resource_lock);
1283	ret = __request_region_locked(res, parent, start, n, name, flags);
1284	write_unlock(&resource_lock);
1285
1286	if (ret) {
1287		free_resource(res);
1288		return NULL;
1289	}
1290
1291	if (parent == &iomem_resource)
1292		revoke_iomem(res);
1293
1294	return res;
1295}
1296EXPORT_SYMBOL(__request_region);
1297
1298/**
1299 * __release_region - release a previously reserved resource region
1300 * @parent: parent resource descriptor
1301 * @start: resource start address
1302 * @n: resource region size
1303 *
1304 * The described resource region must match a currently busy region.
1305 */
1306void __release_region(struct resource *parent, resource_size_t start,
1307		      resource_size_t n)
1308{
1309	struct resource **p;
1310	resource_size_t end;
1311
1312	p = &parent->child;
1313	end = start + n - 1;
1314
1315	write_lock(&resource_lock);
1316
1317	for (;;) {
1318		struct resource *res = *p;
1319
1320		if (!res)
1321			break;
1322		if (res->start <= start && res->end >= end) {
1323			if (!(res->flags & IORESOURCE_BUSY)) {
1324				p = &res->child;
1325				continue;
1326			}
1327			if (res->start != start || res->end != end)
1328				break;
1329			*p = res->sibling;
1330			write_unlock(&resource_lock);
1331			if (res->flags & IORESOURCE_MUXED)
1332				wake_up(&muxed_resource_wait);
1333			free_resource(res);
1334			return;
1335		}
1336		p = &res->sibling;
1337	}
1338
1339	write_unlock(&resource_lock);
1340
1341	pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1342}
1343EXPORT_SYMBOL(__release_region);
1344
1345#ifdef CONFIG_MEMORY_HOTREMOVE
1346/**
1347 * release_mem_region_adjustable - release a previously reserved memory region
1348 * @start: resource start address
1349 * @size: resource region size
1350 *
1351 * This interface is intended for memory hot-delete.  The requested region
1352 * is released from a currently busy memory resource.  The requested region
1353 * must either match exactly or fit into a single busy resource entry.  In
1354 * the latter case, the remaining resource is adjusted accordingly.
1355 * Existing children of the busy memory resource must be immutable in the
1356 * request.
1357 *
1358 * Note:
1359 * - Additional release conditions, such as overlapping region, can be
1360 *   supported after they are confirmed as valid cases.
1361 * - When a busy memory resource gets split into two entries, the code
1362 *   assumes that all children remain in the lower address entry for
1363 *   simplicity.  Enhance this logic when necessary.
1364 */
1365void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1366{
1367	struct resource *parent = &iomem_resource;
1368	struct resource *new_res = NULL;
1369	bool alloc_nofail = false;
1370	struct resource **p;
1371	struct resource *res;
1372	resource_size_t end;
1373
1374	end = start + size - 1;
1375	if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1376		return;
1377
1378	/*
1379	 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1380	 * just before releasing the region. This is highly unlikely to
1381	 * fail - let's play save and make it never fail as the caller cannot
1382	 * perform any error handling (e.g., trying to re-add memory will fail
1383	 * similarly).
1384	 */
1385retry:
1386	new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1387
1388	p = &parent->child;
1389	write_lock(&resource_lock);
1390
1391	while ((res = *p)) {
1392		if (res->start >= end)
1393			break;
1394
1395		/* look for the next resource if it does not fit into */
1396		if (res->start > start || res->end < end) {
1397			p = &res->sibling;
1398			continue;
1399		}
1400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1401		if (!(res->flags & IORESOURCE_MEM))
1402			break;
1403
1404		if (!(res->flags & IORESOURCE_BUSY)) {
1405			p = &res->child;
1406			continue;
1407		}
1408
1409		/* found the target resource; let's adjust accordingly */
1410		if (res->start == start && res->end == end) {
1411			/* free the whole entry */
1412			*p = res->sibling;
1413			free_resource(res);
1414		} else if (res->start == start && res->end != end) {
1415			/* adjust the start */
1416			WARN_ON_ONCE(__adjust_resource(res, end + 1,
1417						       res->end - end));
1418		} else if (res->start != start && res->end == end) {
1419			/* adjust the end */
1420			WARN_ON_ONCE(__adjust_resource(res, res->start,
1421						       start - res->start));
1422		} else {
1423			/* split into two entries - we need a new resource */
1424			if (!new_res) {
1425				new_res = alloc_resource(GFP_ATOMIC);
1426				if (!new_res) {
1427					alloc_nofail = true;
1428					write_unlock(&resource_lock);
1429					goto retry;
1430				}
1431			}
1432			new_res->name = res->name;
1433			new_res->start = end + 1;
1434			new_res->end = res->end;
1435			new_res->flags = res->flags;
1436			new_res->desc = res->desc;
1437			new_res->parent = res->parent;
1438			new_res->sibling = res->sibling;
1439			new_res->child = NULL;
1440
1441			if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1442							   start - res->start)))
1443				break;
1444			res->sibling = new_res;
1445			new_res = NULL;
1446		}
1447
1448		break;
1449	}
1450
1451	write_unlock(&resource_lock);
1452	free_resource(new_res);
1453}
1454#endif	/* CONFIG_MEMORY_HOTREMOVE */
1455
1456#ifdef CONFIG_MEMORY_HOTPLUG
1457static bool system_ram_resources_mergeable(struct resource *r1,
1458					   struct resource *r2)
1459{
1460	/* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1461	return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1462	       r1->name == r2->name && r1->desc == r2->desc &&
1463	       !r1->child && !r2->child;
1464}
1465
1466/**
1467 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1468 *	merge it with adjacent, mergeable resources
1469 * @res: resource descriptor
1470 *
1471 * This interface is intended for memory hotplug, whereby lots of contiguous
1472 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1473 * the actual resource boundaries are not of interest (e.g., it might be
1474 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1475 * same parent, and that don't have any children are considered. All mergeable
1476 * resources must be immutable during the request.
1477 *
1478 * Note:
1479 * - The caller has to make sure that no pointers to resources that are
1480 *   marked mergeable are used anymore after this call - the resource might
1481 *   be freed and the pointer might be stale!
1482 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1483 */
1484void merge_system_ram_resource(struct resource *res)
1485{
1486	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1487	struct resource *cur;
1488
1489	if (WARN_ON_ONCE((res->flags & flags) != flags))
1490		return;
1491
1492	write_lock(&resource_lock);
1493	res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1494
1495	/* Try to merge with next item in the list. */
1496	cur = res->sibling;
1497	if (cur && system_ram_resources_mergeable(res, cur)) {
1498		res->end = cur->end;
1499		res->sibling = cur->sibling;
1500		free_resource(cur);
1501	}
1502
1503	/* Try to merge with previous item in the list. */
1504	cur = res->parent->child;
1505	while (cur && cur->sibling != res)
1506		cur = cur->sibling;
1507	if (cur && system_ram_resources_mergeable(cur, res)) {
1508		cur->end = res->end;
1509		cur->sibling = res->sibling;
1510		free_resource(res);
1511	}
1512	write_unlock(&resource_lock);
1513}
1514#endif	/* CONFIG_MEMORY_HOTPLUG */
1515
1516/*
1517 * Managed region resource
1518 */
1519static void devm_resource_release(struct device *dev, void *ptr)
1520{
1521	struct resource **r = ptr;
1522
1523	release_resource(*r);
1524}
1525
1526/**
1527 * devm_request_resource() - request and reserve an I/O or memory resource
1528 * @dev: device for which to request the resource
1529 * @root: root of the resource tree from which to request the resource
1530 * @new: descriptor of the resource to request
1531 *
1532 * This is a device-managed version of request_resource(). There is usually
1533 * no need to release resources requested by this function explicitly since
1534 * that will be taken care of when the device is unbound from its driver.
1535 * If for some reason the resource needs to be released explicitly, because
1536 * of ordering issues for example, drivers must call devm_release_resource()
1537 * rather than the regular release_resource().
1538 *
1539 * When a conflict is detected between any existing resources and the newly
1540 * requested resource, an error message will be printed.
1541 *
1542 * Returns 0 on success or a negative error code on failure.
1543 */
1544int devm_request_resource(struct device *dev, struct resource *root,
1545			  struct resource *new)
1546{
1547	struct resource *conflict, **ptr;
1548
1549	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1550	if (!ptr)
1551		return -ENOMEM;
1552
1553	*ptr = new;
1554
1555	conflict = request_resource_conflict(root, new);
1556	if (conflict) {
1557		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1558			new, conflict->name, conflict);
1559		devres_free(ptr);
1560		return -EBUSY;
1561	}
1562
1563	devres_add(dev, ptr);
1564	return 0;
1565}
1566EXPORT_SYMBOL(devm_request_resource);
1567
1568static int devm_resource_match(struct device *dev, void *res, void *data)
1569{
1570	struct resource **ptr = res;
1571
1572	return *ptr == data;
1573}
1574
1575/**
1576 * devm_release_resource() - release a previously requested resource
1577 * @dev: device for which to release the resource
1578 * @new: descriptor of the resource to release
1579 *
1580 * Releases a resource previously requested using devm_request_resource().
1581 */
1582void devm_release_resource(struct device *dev, struct resource *new)
1583{
1584	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1585			       new));
1586}
1587EXPORT_SYMBOL(devm_release_resource);
1588
1589struct region_devres {
1590	struct resource *parent;
1591	resource_size_t start;
1592	resource_size_t n;
1593};
1594
1595static void devm_region_release(struct device *dev, void *res)
1596{
1597	struct region_devres *this = res;
1598
1599	__release_region(this->parent, this->start, this->n);
1600}
1601
1602static int devm_region_match(struct device *dev, void *res, void *match_data)
1603{
1604	struct region_devres *this = res, *match = match_data;
1605
1606	return this->parent == match->parent &&
1607		this->start == match->start && this->n == match->n;
1608}
1609
1610struct resource *
1611__devm_request_region(struct device *dev, struct resource *parent,
1612		      resource_size_t start, resource_size_t n, const char *name)
1613{
1614	struct region_devres *dr = NULL;
1615	struct resource *res;
1616
1617	dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1618			  GFP_KERNEL);
1619	if (!dr)
1620		return NULL;
1621
1622	dr->parent = parent;
1623	dr->start = start;
1624	dr->n = n;
1625
1626	res = __request_region(parent, start, n, name, 0);
1627	if (res)
1628		devres_add(dev, dr);
1629	else
1630		devres_free(dr);
1631
1632	return res;
1633}
1634EXPORT_SYMBOL(__devm_request_region);
1635
1636void __devm_release_region(struct device *dev, struct resource *parent,
1637			   resource_size_t start, resource_size_t n)
1638{
1639	struct region_devres match_data = { parent, start, n };
1640
1641	__release_region(parent, start, n);
1642	WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1643			       &match_data));
1644}
1645EXPORT_SYMBOL(__devm_release_region);
1646
1647/*
1648 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1649 */
1650#define MAXRESERVE 4
1651static int __init reserve_setup(char *str)
1652{
1653	static int reserved;
1654	static struct resource reserve[MAXRESERVE];
1655
1656	for (;;) {
1657		unsigned int io_start, io_num;
1658		int x = reserved;
1659		struct resource *parent;
1660
1661		if (get_option(&str, &io_start) != 2)
1662			break;
1663		if (get_option(&str, &io_num) == 0)
1664			break;
1665		if (x < MAXRESERVE) {
1666			struct resource *res = reserve + x;
1667
1668			/*
1669			 * If the region starts below 0x10000, we assume it's
1670			 * I/O port space; otherwise assume it's memory.
1671			 */
1672			if (io_start < 0x10000) {
1673				res->flags = IORESOURCE_IO;
1674				parent = &ioport_resource;
1675			} else {
1676				res->flags = IORESOURCE_MEM;
1677				parent = &iomem_resource;
1678			}
1679			res->name = "reserved";
1680			res->start = io_start;
1681			res->end = io_start + io_num - 1;
1682			res->flags |= IORESOURCE_BUSY;
1683			res->desc = IORES_DESC_NONE;
1684			res->child = NULL;
1685			if (request_resource(parent, res) == 0)
1686				reserved = x+1;
1687		}
1688	}
1689	return 1;
1690}
1691__setup("reserve=", reserve_setup);
1692
1693/*
1694 * Check if the requested addr and size spans more than any slot in the
1695 * iomem resource tree.
1696 */
1697int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1698{
 
1699	resource_size_t end = addr + size - 1;
1700	struct resource *p;
1701	int err = 0;
 
1702
1703	read_lock(&resource_lock);
1704	for_each_resource(&iomem_resource, p, false) {
1705		/*
1706		 * We can probably skip the resources without
1707		 * IORESOURCE_IO attribute?
1708		 */
1709		if (p->start > end)
1710			continue;
1711		if (p->end < addr)
1712			continue;
1713		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1714		    PFN_DOWN(p->end) >= PFN_DOWN(end))
1715			continue;
1716		/*
1717		 * if a resource is "BUSY", it's not a hardware resource
1718		 * but a driver mapping of such a resource; we don't want
1719		 * to warn for those; some drivers legitimately map only
1720		 * partial hardware resources. (example: vesafb)
1721		 */
1722		if (p->flags & IORESOURCE_BUSY)
1723			continue;
1724
1725		pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1726			&addr, &end, p->name, p);
1727		err = -1;
1728		break;
1729	}
1730	read_unlock(&resource_lock);
1731
1732	return err;
1733}
1734
1735#ifdef CONFIG_STRICT_DEVMEM
1736static int strict_iomem_checks = 1;
1737#else
1738static int strict_iomem_checks;
1739#endif
1740
1741/*
1742 * Check if an address is exclusive to the kernel and must not be mapped to
1743 * user space, for example, via /dev/mem.
1744 *
1745 * Returns true if exclusive to the kernel, otherwise returns false.
1746 */
1747bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1748{
1749	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1750						  IORESOURCE_EXCLUSIVE;
1751	bool skip_children = false, err = false;
1752	struct resource *p;
1753
1754	read_lock(&resource_lock);
1755	for_each_resource(root, p, skip_children) {
1756		if (p->start >= addr + size)
1757			break;
1758		if (p->end < addr) {
1759			skip_children = true;
1760			continue;
1761		}
1762		skip_children = false;
1763
1764		/*
1765		 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1766		 * IORESOURCE_EXCLUSIVE is set, even if they
1767		 * are not busy and even if "iomem=relaxed" is set. The
1768		 * responsible driver dynamically adds/removes system RAM within
1769		 * such an area and uncontrolled access is dangerous.
1770		 */
1771		if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1772			err = true;
1773			break;
1774		}
1775
1776		/*
1777		 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1778		 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1779		 * resource is busy.
1780		 */
1781		if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1782			continue;
1783		if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1784				|| p->flags & IORESOURCE_EXCLUSIVE) {
1785			err = true;
1786			break;
1787		}
1788	}
1789	read_unlock(&resource_lock);
1790
1791	return err;
1792}
1793
1794bool iomem_is_exclusive(u64 addr)
1795{
1796	return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1797				     PAGE_SIZE);
1798}
1799
1800struct resource_entry *resource_list_create_entry(struct resource *res,
1801						  size_t extra_size)
1802{
1803	struct resource_entry *entry;
1804
1805	entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1806	if (entry) {
1807		INIT_LIST_HEAD(&entry->node);
1808		entry->res = res ? res : &entry->__res;
1809	}
1810
1811	return entry;
1812}
1813EXPORT_SYMBOL(resource_list_create_entry);
1814
1815void resource_list_free(struct list_head *head)
1816{
1817	struct resource_entry *entry, *tmp;
1818
1819	list_for_each_entry_safe(entry, tmp, head, node)
1820		resource_list_destroy_entry(entry);
1821}
1822EXPORT_SYMBOL(resource_list_free);
1823
1824#ifdef CONFIG_GET_FREE_REGION
1825#define GFR_DESCENDING		(1UL << 0)
1826#define GFR_REQUEST_REGION	(1UL << 1)
1827#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1828
1829static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1830				 resource_size_t align, unsigned long flags)
1831{
1832	if (flags & GFR_DESCENDING) {
1833		resource_size_t end;
1834
1835		end = min_t(resource_size_t, base->end,
1836			    (1ULL << MAX_PHYSMEM_BITS) - 1);
1837		return end - size + 1;
1838	}
1839
1840	return ALIGN(base->start, align);
1841}
1842
1843static bool gfr_continue(struct resource *base, resource_size_t addr,
1844			 resource_size_t size, unsigned long flags)
1845{
1846	if (flags & GFR_DESCENDING)
1847		return addr > size && addr >= base->start;
1848	/*
1849	 * In the ascend case be careful that the last increment by
1850	 * @size did not wrap 0.
1851	 */
1852	return addr > addr - size &&
1853	       addr <= min_t(resource_size_t, base->end,
1854			     (1ULL << MAX_PHYSMEM_BITS) - 1);
1855}
1856
1857static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1858				unsigned long flags)
1859{
1860	if (flags & GFR_DESCENDING)
1861		return addr - size;
1862	return addr + size;
1863}
1864
1865static void remove_free_mem_region(void *_res)
1866{
1867	struct resource *res = _res;
1868
1869	if (res->parent)
1870		remove_resource(res);
1871	free_resource(res);
1872}
1873
1874static struct resource *
1875get_free_mem_region(struct device *dev, struct resource *base,
1876		    resource_size_t size, const unsigned long align,
1877		    const char *name, const unsigned long desc,
1878		    const unsigned long flags)
1879{
1880	resource_size_t addr;
1881	struct resource *res;
1882	struct region_devres *dr = NULL;
1883
1884	size = ALIGN(size, align);
1885
1886	res = alloc_resource(GFP_KERNEL);
1887	if (!res)
1888		return ERR_PTR(-ENOMEM);
1889
1890	if (dev && (flags & GFR_REQUEST_REGION)) {
1891		dr = devres_alloc(devm_region_release,
1892				sizeof(struct region_devres), GFP_KERNEL);
1893		if (!dr) {
1894			free_resource(res);
1895			return ERR_PTR(-ENOMEM);
1896		}
1897	} else if (dev) {
1898		if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1899			return ERR_PTR(-ENOMEM);
1900	}
1901
1902	write_lock(&resource_lock);
1903	for (addr = gfr_start(base, size, align, flags);
1904	     gfr_continue(base, addr, align, flags);
1905	     addr = gfr_next(addr, align, flags)) {
1906		if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1907		    REGION_DISJOINT)
1908			continue;
1909
1910		if (flags & GFR_REQUEST_REGION) {
1911			if (__request_region_locked(res, &iomem_resource, addr,
1912						    size, name, 0))
1913				break;
1914
1915			if (dev) {
1916				dr->parent = &iomem_resource;
1917				dr->start = addr;
1918				dr->n = size;
1919				devres_add(dev, dr);
1920			}
1921
1922			res->desc = desc;
1923			write_unlock(&resource_lock);
1924
1925
1926			/*
1927			 * A driver is claiming this region so revoke any
1928			 * mappings.
1929			 */
1930			revoke_iomem(res);
1931		} else {
1932			res->start = addr;
1933			res->end = addr + size - 1;
1934			res->name = name;
1935			res->desc = desc;
1936			res->flags = IORESOURCE_MEM;
1937
1938			/*
1939			 * Only succeed if the resource hosts an exclusive
1940			 * range after the insert
1941			 */
1942			if (__insert_resource(base, res) || res->child)
1943				break;
1944
1945			write_unlock(&resource_lock);
1946		}
1947
1948		return res;
1949	}
1950	write_unlock(&resource_lock);
1951
1952	if (flags & GFR_REQUEST_REGION) {
1953		free_resource(res);
1954		devres_free(dr);
1955	} else if (dev)
1956		devm_release_action(dev, remove_free_mem_region, res);
1957
1958	return ERR_PTR(-ERANGE);
1959}
1960
1961/**
1962 * devm_request_free_mem_region - find free region for device private memory
1963 *
1964 * @dev: device struct to bind the resource to
1965 * @size: size in bytes of the device memory to add
1966 * @base: resource tree to look in
1967 *
1968 * This function tries to find an empty range of physical address big enough to
1969 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1970 * memory, which in turn allocates struct pages.
1971 */
1972struct resource *devm_request_free_mem_region(struct device *dev,
1973		struct resource *base, unsigned long size)
1974{
1975	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1976
1977	return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
1978				   dev_name(dev),
1979				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1980}
1981EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1982
1983struct resource *request_free_mem_region(struct resource *base,
1984		unsigned long size, const char *name)
1985{
1986	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1987
1988	return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
1989				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1990}
1991EXPORT_SYMBOL_GPL(request_free_mem_region);
1992
1993/**
1994 * alloc_free_mem_region - find a free region relative to @base
1995 * @base: resource that will parent the new resource
1996 * @size: size in bytes of memory to allocate from @base
1997 * @align: alignment requirements for the allocation
1998 * @name: resource name
1999 *
2000 * Buses like CXL, that can dynamically instantiate new memory regions,
2001 * need a method to allocate physical address space for those regions.
2002 * Allocate and insert a new resource to cover a free, unclaimed by a
2003 * descendant of @base, range in the span of @base.
2004 */
2005struct resource *alloc_free_mem_region(struct resource *base,
2006				       unsigned long size, unsigned long align,
2007				       const char *name)
2008{
2009	/* Default of ascending direction and insert resource */
2010	unsigned long flags = 0;
2011
2012	return get_free_mem_region(NULL, base, size, align, name,
2013				   IORES_DESC_NONE, flags);
2014}
2015EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
2016#endif /* CONFIG_GET_FREE_REGION */
2017
2018static int __init strict_iomem(char *str)
2019{
2020	if (strstr(str, "relaxed"))
2021		strict_iomem_checks = 0;
2022	if (strstr(str, "strict"))
2023		strict_iomem_checks = 1;
2024	return 1;
2025}
2026
2027static int iomem_fs_init_fs_context(struct fs_context *fc)
2028{
2029	return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2030}
2031
2032static struct file_system_type iomem_fs_type = {
2033	.name		= "iomem",
2034	.owner		= THIS_MODULE,
2035	.init_fs_context = iomem_fs_init_fs_context,
2036	.kill_sb	= kill_anon_super,
2037};
2038
2039static int __init iomem_init_inode(void)
2040{
2041	static struct vfsmount *iomem_vfs_mount;
2042	static int iomem_fs_cnt;
2043	struct inode *inode;
2044	int rc;
2045
2046	rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2047	if (rc < 0) {
2048		pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2049		return rc;
2050	}
2051
2052	inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2053	if (IS_ERR(inode)) {
2054		rc = PTR_ERR(inode);
2055		pr_err("Cannot allocate inode for iomem: %d\n", rc);
2056		simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2057		return rc;
2058	}
2059
2060	/*
2061	 * Publish iomem revocation inode initialized.
2062	 * Pairs with smp_load_acquire() in revoke_iomem().
2063	 */
2064	smp_store_release(&iomem_inode, inode);
2065
2066	return 0;
2067}
2068
2069fs_initcall(iomem_init_inode);
2070
2071__setup("iomem=", strict_iomem);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	linux/kernel/resource.c
   4 *
   5 * Copyright (C) 1999	Linus Torvalds
   6 * Copyright (C) 1999	Martin Mares <mj@ucw.cz>
   7 *
   8 * Arbitrary resource management.
   9 */
  10
  11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12
  13#include <linux/export.h>
  14#include <linux/errno.h>
  15#include <linux/ioport.h>
  16#include <linux/init.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/fs.h>
  20#include <linux/proc_fs.h>
  21#include <linux/pseudo_fs.h>
  22#include <linux/sched.h>
  23#include <linux/seq_file.h>
  24#include <linux/device.h>
  25#include <linux/pfn.h>
  26#include <linux/mm.h>
  27#include <linux/mount.h>
  28#include <linux/resource_ext.h>
  29#include <uapi/linux/magic.h>
 
 
  30#include <asm/io.h>
  31
  32
  33struct resource ioport_resource = {
  34	.name	= "PCI IO",
  35	.start	= 0,
  36	.end	= IO_SPACE_LIMIT,
  37	.flags	= IORESOURCE_IO,
  38};
  39EXPORT_SYMBOL(ioport_resource);
  40
  41struct resource iomem_resource = {
  42	.name	= "PCI mem",
  43	.start	= 0,
  44	.end	= -1,
  45	.flags	= IORESOURCE_MEM,
  46};
  47EXPORT_SYMBOL(iomem_resource);
  48
  49/* constraints to be met while allocating resources */
  50struct resource_constraint {
  51	resource_size_t min, max, align;
  52	resource_size_t (*alignf)(void *, const struct resource *,
  53			resource_size_t, resource_size_t);
  54	void *alignf_data;
  55};
  56
  57static DEFINE_RWLOCK(resource_lock);
  58
  59static struct resource *next_resource(struct resource *p)
  60{
  61	if (p->child)
  62		return p->child;
  63	while (!p->sibling && p->parent)
  64		p = p->parent;
  65	return p->sibling;
  66}
  67
  68static struct resource *next_resource_skip_children(struct resource *p)
  69{
  70	while (!p->sibling && p->parent)
  71		p = p->parent;
  72	return p->sibling;
  73}
  74
  75#define for_each_resource(_root, _p, _skip_children) \
  76	for ((_p) = (_root)->child; (_p); \
  77	     (_p) = (_skip_children) ? next_resource_skip_children(_p) : \
  78				       next_resource(_p))
  79
  80static void *r_next(struct seq_file *m, void *v, loff_t *pos)
  81{
  82	struct resource *p = v;
  83	(*pos)++;
  84	return (void *)next_resource(p);
  85}
  86
  87#ifdef CONFIG_PROC_FS
  88
  89enum { MAX_IORES_LEVEL = 5 };
  90
  91static void *r_start(struct seq_file *m, loff_t *pos)
  92	__acquires(resource_lock)
  93{
  94	struct resource *p = pde_data(file_inode(m->file));
  95	loff_t l = 0;
 
 
  96	read_lock(&resource_lock);
  97	for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
  98		;
 
 
 
  99	return p;
 100}
 101
 
 
 
 
 
 
 
 
 
 102static void r_stop(struct seq_file *m, void *v)
 103	__releases(resource_lock)
 104{
 105	read_unlock(&resource_lock);
 106}
 107
 108static int r_show(struct seq_file *m, void *v)
 109{
 110	struct resource *root = pde_data(file_inode(m->file));
 111	struct resource *r = v, *p;
 112	unsigned long long start, end;
 113	int width = root->end < 0x10000 ? 4 : 8;
 114	int depth;
 115
 116	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
 117		if (p->parent == root)
 118			break;
 119
 120	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
 121		start = r->start;
 122		end = r->end;
 123	} else {
 124		start = end = 0;
 125	}
 126
 127	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
 128			depth * 2, "",
 129			width, start,
 130			width, end,
 131			r->name ? r->name : "<BAD>");
 132	return 0;
 133}
 134
 135static const struct seq_operations resource_op = {
 136	.start	= r_start,
 137	.next	= r_next,
 138	.stop	= r_stop,
 139	.show	= r_show,
 140};
 141
 142static int __init ioresources_init(void)
 143{
 144	proc_create_seq_data("ioports", 0, NULL, &resource_op,
 145			&ioport_resource);
 146	proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
 147	return 0;
 148}
 149__initcall(ioresources_init);
 150
 151#endif /* CONFIG_PROC_FS */
 152
 153static void free_resource(struct resource *res)
 154{
 155	/**
 156	 * If the resource was allocated using memblock early during boot
 157	 * we'll leak it here: we can only return full pages back to the
 158	 * buddy and trying to be smart and reusing them eventually in
 159	 * alloc_resource() overcomplicates resource handling.
 160	 */
 161	if (res && PageSlab(virt_to_head_page(res)))
 162		kfree(res);
 163}
 164
 165static struct resource *alloc_resource(gfp_t flags)
 166{
 167	return kzalloc(sizeof(struct resource), flags);
 168}
 169
 170/* Return the conflict entry if you can't request it */
 171static struct resource * __request_resource(struct resource *root, struct resource *new)
 172{
 173	resource_size_t start = new->start;
 174	resource_size_t end = new->end;
 175	struct resource *tmp, **p;
 176
 177	if (end < start)
 178		return root;
 179	if (start < root->start)
 180		return root;
 181	if (end > root->end)
 182		return root;
 183	p = &root->child;
 184	for (;;) {
 185		tmp = *p;
 186		if (!tmp || tmp->start > end) {
 187			new->sibling = tmp;
 188			*p = new;
 189			new->parent = root;
 190			return NULL;
 191		}
 192		p = &tmp->sibling;
 193		if (tmp->end < start)
 194			continue;
 195		return tmp;
 196	}
 197}
 198
 199static int __release_resource(struct resource *old, bool release_child)
 200{
 201	struct resource *tmp, **p, *chd;
 202
 203	p = &old->parent->child;
 204	for (;;) {
 205		tmp = *p;
 206		if (!tmp)
 207			break;
 208		if (tmp == old) {
 209			if (release_child || !(tmp->child)) {
 210				*p = tmp->sibling;
 211			} else {
 212				for (chd = tmp->child;; chd = chd->sibling) {
 213					chd->parent = tmp->parent;
 214					if (!(chd->sibling))
 215						break;
 216				}
 217				*p = tmp->child;
 218				chd->sibling = tmp->sibling;
 219			}
 220			old->parent = NULL;
 221			return 0;
 222		}
 223		p = &tmp->sibling;
 224	}
 225	return -EINVAL;
 226}
 227
 228static void __release_child_resources(struct resource *r)
 229{
 230	struct resource *tmp, *p;
 231	resource_size_t size;
 232
 233	p = r->child;
 234	r->child = NULL;
 235	while (p) {
 236		tmp = p;
 237		p = p->sibling;
 238
 239		tmp->parent = NULL;
 240		tmp->sibling = NULL;
 241		__release_child_resources(tmp);
 242
 243		printk(KERN_DEBUG "release child resource %pR\n", tmp);
 244		/* need to restore size, and keep flags */
 245		size = resource_size(tmp);
 246		tmp->start = 0;
 247		tmp->end = size - 1;
 248	}
 249}
 250
 251void release_child_resources(struct resource *r)
 252{
 253	write_lock(&resource_lock);
 254	__release_child_resources(r);
 255	write_unlock(&resource_lock);
 256}
 257
 258/**
 259 * request_resource_conflict - request and reserve an I/O or memory resource
 260 * @root: root resource descriptor
 261 * @new: resource descriptor desired by caller
 262 *
 263 * Returns 0 for success, conflict resource on error.
 264 */
 265struct resource *request_resource_conflict(struct resource *root, struct resource *new)
 266{
 267	struct resource *conflict;
 268
 269	write_lock(&resource_lock);
 270	conflict = __request_resource(root, new);
 271	write_unlock(&resource_lock);
 272	return conflict;
 273}
 274
 275/**
 276 * request_resource - request and reserve an I/O or memory resource
 277 * @root: root resource descriptor
 278 * @new: resource descriptor desired by caller
 279 *
 280 * Returns 0 for success, negative error code on error.
 281 */
 282int request_resource(struct resource *root, struct resource *new)
 283{
 284	struct resource *conflict;
 285
 286	conflict = request_resource_conflict(root, new);
 287	return conflict ? -EBUSY : 0;
 288}
 289
 290EXPORT_SYMBOL(request_resource);
 291
 292/**
 293 * release_resource - release a previously reserved resource
 294 * @old: resource pointer
 295 */
 296int release_resource(struct resource *old)
 297{
 298	int retval;
 299
 300	write_lock(&resource_lock);
 301	retval = __release_resource(old, true);
 302	write_unlock(&resource_lock);
 303	return retval;
 304}
 305
 306EXPORT_SYMBOL(release_resource);
 307
 308/**
 309 * find_next_iomem_res - Finds the lowest iomem resource that covers part of
 310 *			 [@start..@end].
 311 *
 312 * If a resource is found, returns 0 and @*res is overwritten with the part
 313 * of the resource that's within [@start..@end]; if none is found, returns
 314 * -ENODEV.  Returns -EINVAL for invalid parameters.
 315 *
 316 * @start:	start address of the resource searched for
 317 * @end:	end address of same resource
 318 * @flags:	flags which the resource must have
 319 * @desc:	descriptor the resource must have
 320 * @res:	return ptr, if resource found
 321 *
 322 * The caller must specify @start, @end, @flags, and @desc
 323 * (which may be IORES_DESC_NONE).
 324 */
 325static int find_next_iomem_res(resource_size_t start, resource_size_t end,
 326			       unsigned long flags, unsigned long desc,
 327			       struct resource *res)
 328{
 329	struct resource *p;
 330
 331	if (!res)
 332		return -EINVAL;
 333
 334	if (start >= end)
 335		return -EINVAL;
 336
 337	read_lock(&resource_lock);
 338
 339	for (p = iomem_resource.child; p; p = next_resource(p)) {
 340		/* If we passed the resource we are looking for, stop */
 341		if (p->start > end) {
 342			p = NULL;
 343			break;
 344		}
 345
 346		/* Skip until we find a range that matches what we look for */
 347		if (p->end < start)
 348			continue;
 349
 350		if ((p->flags & flags) != flags)
 351			continue;
 352		if ((desc != IORES_DESC_NONE) && (desc != p->desc))
 353			continue;
 354
 355		/* Found a match, break */
 356		break;
 357	}
 358
 359	if (p) {
 360		/* copy data */
 361		*res = (struct resource) {
 362			.start = max(start, p->start),
 363			.end = min(end, p->end),
 364			.flags = p->flags,
 365			.desc = p->desc,
 366			.parent = p->parent,
 367		};
 368	}
 369
 370	read_unlock(&resource_lock);
 371	return p ? 0 : -ENODEV;
 372}
 373
 374static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
 375				 unsigned long flags, unsigned long desc,
 376				 void *arg,
 377				 int (*func)(struct resource *, void *))
 378{
 379	struct resource res;
 380	int ret = -EINVAL;
 381
 382	while (start < end &&
 383	       !find_next_iomem_res(start, end, flags, desc, &res)) {
 384		ret = (*func)(&res, arg);
 385		if (ret)
 386			break;
 387
 388		start = res.end + 1;
 389	}
 390
 391	return ret;
 392}
 393
 394/**
 395 * walk_iomem_res_desc - Walks through iomem resources and calls func()
 396 *			 with matching resource ranges.
 397 * *
 398 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
 399 * @flags: I/O resource flags
 400 * @start: start addr
 401 * @end: end addr
 402 * @arg: function argument for the callback @func
 403 * @func: callback function that is called for each qualifying resource area
 404 *
 405 * All the memory ranges which overlap start,end and also match flags and
 406 * desc are valid candidates.
 407 *
 408 * NOTE: For a new descriptor search, define a new IORES_DESC in
 409 * <linux/ioport.h> and set it in 'desc' of a target resource entry.
 410 */
 411int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
 412		u64 end, void *arg, int (*func)(struct resource *, void *))
 413{
 414	return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
 415}
 416EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
 417
 418/*
 419 * This function calls the @func callback against all memory ranges of type
 420 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
 421 * Now, this function is only for System RAM, it deals with full ranges and
 422 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
 423 * ranges.
 424 */
 425int walk_system_ram_res(u64 start, u64 end, void *arg,
 426			int (*func)(struct resource *, void *))
 427{
 428	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 429
 430	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
 431				     func);
 432}
 433
 434/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435 * This function calls the @func callback against all memory ranges, which
 436 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
 437 */
 438int walk_mem_res(u64 start, u64 end, void *arg,
 439		 int (*func)(struct resource *, void *))
 440{
 441	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 442
 443	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
 444				     func);
 445}
 446
 447/*
 448 * This function calls the @func callback against all memory ranges of type
 449 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
 450 * It is to be used only for System RAM.
 451 */
 452int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 453			  void *arg, int (*func)(unsigned long, unsigned long, void *))
 454{
 455	resource_size_t start, end;
 456	unsigned long flags;
 457	struct resource res;
 458	unsigned long pfn, end_pfn;
 459	int ret = -EINVAL;
 460
 461	start = (u64) start_pfn << PAGE_SHIFT;
 462	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
 463	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 464	while (start < end &&
 465	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
 466		pfn = PFN_UP(res.start);
 467		end_pfn = PFN_DOWN(res.end + 1);
 468		if (end_pfn > pfn)
 469			ret = (*func)(pfn, end_pfn - pfn, arg);
 470		if (ret)
 471			break;
 472		start = res.end + 1;
 473	}
 474	return ret;
 475}
 476
 477static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
 478{
 479	return 1;
 480}
 481
 482/*
 483 * This generic page_is_ram() returns true if specified address is
 484 * registered as System RAM in iomem_resource list.
 485 */
 486int __weak page_is_ram(unsigned long pfn)
 487{
 488	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
 489}
 490EXPORT_SYMBOL_GPL(page_is_ram);
 491
 492static int __region_intersects(struct resource *parent, resource_size_t start,
 493			       size_t size, unsigned long flags,
 494			       unsigned long desc)
 495{
 496	struct resource res;
 497	int type = 0; int other = 0;
 498	struct resource *p;
 499
 500	res.start = start;
 501	res.end = start + size - 1;
 502
 503	for (p = parent->child; p ; p = p->sibling) {
 504		bool is_type = (((p->flags & flags) == flags) &&
 505				((desc == IORES_DESC_NONE) ||
 506				 (desc == p->desc)));
 507
 508		if (resource_overlaps(p, &res))
 509			is_type ? type++ : other++;
 510	}
 511
 512	if (type == 0)
 513		return REGION_DISJOINT;
 514
 515	if (other == 0)
 516		return REGION_INTERSECTS;
 517
 518	return REGION_MIXED;
 519}
 520
 521/**
 522 * region_intersects() - determine intersection of region with known resources
 523 * @start: region start address
 524 * @size: size of region
 525 * @flags: flags of resource (in iomem_resource)
 526 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
 527 *
 528 * Check if the specified region partially overlaps or fully eclipses a
 529 * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
 530 * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
 531 * return REGION_MIXED if the region overlaps @flags/@desc and another
 532 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
 533 * and no other defined resource. Note that REGION_INTERSECTS is also
 534 * returned in the case when the specified region overlaps RAM and undefined
 535 * memory holes.
 536 *
 537 * region_intersect() is used by memory remapping functions to ensure
 538 * the user is not remapping RAM and is a vast speed up over walking
 539 * through the resource table page by page.
 540 */
 541int region_intersects(resource_size_t start, size_t size, unsigned long flags,
 542		      unsigned long desc)
 543{
 544	int ret;
 545
 546	read_lock(&resource_lock);
 547	ret = __region_intersects(&iomem_resource, start, size, flags, desc);
 548	read_unlock(&resource_lock);
 549
 550	return ret;
 551}
 552EXPORT_SYMBOL_GPL(region_intersects);
 553
 554void __weak arch_remove_reservations(struct resource *avail)
 555{
 556}
 557
 558static resource_size_t simple_align_resource(void *data,
 559					     const struct resource *avail,
 560					     resource_size_t size,
 561					     resource_size_t align)
 562{
 563	return avail->start;
 564}
 565
 566static void resource_clip(struct resource *res, resource_size_t min,
 567			  resource_size_t max)
 568{
 569	if (res->start < min)
 570		res->start = min;
 571	if (res->end > max)
 572		res->end = max;
 573}
 574
 575/*
 576 * Find empty slot in the resource tree with the given range and
 577 * alignment constraints
 578 */
 579static int __find_resource(struct resource *root, struct resource *old,
 580			 struct resource *new,
 581			 resource_size_t  size,
 582			 struct resource_constraint *constraint)
 583{
 584	struct resource *this = root->child;
 585	struct resource tmp = *new, avail, alloc;
 586
 587	tmp.start = root->start;
 588	/*
 589	 * Skip past an allocated resource that starts at 0, since the assignment
 590	 * of this->start - 1 to tmp->end below would cause an underflow.
 591	 */
 592	if (this && this->start == root->start) {
 593		tmp.start = (this == old) ? old->start : this->end + 1;
 594		this = this->sibling;
 595	}
 596	for(;;) {
 597		if (this)
 598			tmp.end = (this == old) ?  this->end : this->start - 1;
 599		else
 600			tmp.end = root->end;
 601
 602		if (tmp.end < tmp.start)
 603			goto next;
 604
 605		resource_clip(&tmp, constraint->min, constraint->max);
 606		arch_remove_reservations(&tmp);
 607
 608		/* Check for overflow after ALIGN() */
 609		avail.start = ALIGN(tmp.start, constraint->align);
 610		avail.end = tmp.end;
 611		avail.flags = new->flags & ~IORESOURCE_UNSET;
 612		if (avail.start >= tmp.start) {
 613			alloc.flags = avail.flags;
 614			alloc.start = constraint->alignf(constraint->alignf_data, &avail,
 615					size, constraint->align);
 616			alloc.end = alloc.start + size - 1;
 617			if (alloc.start <= alloc.end &&
 618			    resource_contains(&avail, &alloc)) {
 619				new->start = alloc.start;
 620				new->end = alloc.end;
 621				return 0;
 622			}
 623		}
 624
 625next:		if (!this || this->end == root->end)
 626			break;
 627
 628		if (this != old)
 629			tmp.start = this->end + 1;
 630		this = this->sibling;
 631	}
 632	return -EBUSY;
 633}
 634
 635/*
 636 * Find empty slot in the resource tree given range and alignment.
 637 */
 638static int find_resource(struct resource *root, struct resource *new,
 639			resource_size_t size,
 640			struct resource_constraint  *constraint)
 641{
 642	return  __find_resource(root, NULL, new, size, constraint);
 643}
 644
 645/**
 646 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
 647 *	The resource will be relocated if the new size cannot be reallocated in the
 648 *	current location.
 649 *
 650 * @root: root resource descriptor
 651 * @old:  resource descriptor desired by caller
 652 * @newsize: new size of the resource descriptor
 653 * @constraint: the size and alignment constraints to be met.
 654 */
 655static int reallocate_resource(struct resource *root, struct resource *old,
 656			       resource_size_t newsize,
 657			       struct resource_constraint *constraint)
 658{
 659	int err=0;
 660	struct resource new = *old;
 661	struct resource *conflict;
 662
 663	write_lock(&resource_lock);
 664
 665	if ((err = __find_resource(root, old, &new, newsize, constraint)))
 666		goto out;
 667
 668	if (resource_contains(&new, old)) {
 669		old->start = new.start;
 670		old->end = new.end;
 671		goto out;
 672	}
 673
 674	if (old->child) {
 675		err = -EBUSY;
 676		goto out;
 677	}
 678
 679	if (resource_contains(old, &new)) {
 680		old->start = new.start;
 681		old->end = new.end;
 682	} else {
 683		__release_resource(old, true);
 684		*old = new;
 685		conflict = __request_resource(root, old);
 686		BUG_ON(conflict);
 687	}
 688out:
 689	write_unlock(&resource_lock);
 690	return err;
 691}
 692
 693
 694/**
 695 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
 696 * 	The resource will be reallocated with a new size if it was already allocated
 697 * @root: root resource descriptor
 698 * @new: resource descriptor desired by caller
 699 * @size: requested resource region size
 700 * @min: minimum boundary to allocate
 701 * @max: maximum boundary to allocate
 702 * @align: alignment requested, in bytes
 703 * @alignf: alignment function, optional, called if not NULL
 704 * @alignf_data: arbitrary data to pass to the @alignf function
 705 */
 706int allocate_resource(struct resource *root, struct resource *new,
 707		      resource_size_t size, resource_size_t min,
 708		      resource_size_t max, resource_size_t align,
 709		      resource_size_t (*alignf)(void *,
 710						const struct resource *,
 711						resource_size_t,
 712						resource_size_t),
 713		      void *alignf_data)
 714{
 715	int err;
 716	struct resource_constraint constraint;
 717
 718	if (!alignf)
 719		alignf = simple_align_resource;
 720
 721	constraint.min = min;
 722	constraint.max = max;
 723	constraint.align = align;
 724	constraint.alignf = alignf;
 725	constraint.alignf_data = alignf_data;
 726
 727	if ( new->parent ) {
 728		/* resource is already allocated, try reallocating with
 729		   the new constraints */
 730		return reallocate_resource(root, new, size, &constraint);
 731	}
 732
 733	write_lock(&resource_lock);
 734	err = find_resource(root, new, size, &constraint);
 735	if (err >= 0 && __request_resource(root, new))
 736		err = -EBUSY;
 737	write_unlock(&resource_lock);
 738	return err;
 739}
 740
 741EXPORT_SYMBOL(allocate_resource);
 742
 743/**
 744 * lookup_resource - find an existing resource by a resource start address
 745 * @root: root resource descriptor
 746 * @start: resource start address
 747 *
 748 * Returns a pointer to the resource if found, NULL otherwise
 749 */
 750struct resource *lookup_resource(struct resource *root, resource_size_t start)
 751{
 752	struct resource *res;
 753
 754	read_lock(&resource_lock);
 755	for (res = root->child; res; res = res->sibling) {
 756		if (res->start == start)
 757			break;
 758	}
 759	read_unlock(&resource_lock);
 760
 761	return res;
 762}
 763
 764/*
 765 * Insert a resource into the resource tree. If successful, return NULL,
 766 * otherwise return the conflicting resource (compare to __request_resource())
 767 */
 768static struct resource * __insert_resource(struct resource *parent, struct resource *new)
 769{
 770	struct resource *first, *next;
 771
 772	for (;; parent = first) {
 773		first = __request_resource(parent, new);
 774		if (!first)
 775			return first;
 776
 777		if (first == parent)
 778			return first;
 779		if (WARN_ON(first == new))	/* duplicated insertion */
 780			return first;
 781
 782		if ((first->start > new->start) || (first->end < new->end))
 783			break;
 784		if ((first->start == new->start) && (first->end == new->end))
 785			break;
 786	}
 787
 788	for (next = first; ; next = next->sibling) {
 789		/* Partial overlap? Bad, and unfixable */
 790		if (next->start < new->start || next->end > new->end)
 791			return next;
 792		if (!next->sibling)
 793			break;
 794		if (next->sibling->start > new->end)
 795			break;
 796	}
 797
 798	new->parent = parent;
 799	new->sibling = next->sibling;
 800	new->child = first;
 801
 802	next->sibling = NULL;
 803	for (next = first; next; next = next->sibling)
 804		next->parent = new;
 805
 806	if (parent->child == first) {
 807		parent->child = new;
 808	} else {
 809		next = parent->child;
 810		while (next->sibling != first)
 811			next = next->sibling;
 812		next->sibling = new;
 813	}
 814	return NULL;
 815}
 816
 817/**
 818 * insert_resource_conflict - Inserts resource in the resource tree
 819 * @parent: parent of the new resource
 820 * @new: new resource to insert
 821 *
 822 * Returns 0 on success, conflict resource if the resource can't be inserted.
 823 *
 824 * This function is equivalent to request_resource_conflict when no conflict
 825 * happens. If a conflict happens, and the conflicting resources
 826 * entirely fit within the range of the new resource, then the new
 827 * resource is inserted and the conflicting resources become children of
 828 * the new resource.
 829 *
 830 * This function is intended for producers of resources, such as FW modules
 831 * and bus drivers.
 832 */
 833struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
 834{
 835	struct resource *conflict;
 836
 837	write_lock(&resource_lock);
 838	conflict = __insert_resource(parent, new);
 839	write_unlock(&resource_lock);
 840	return conflict;
 841}
 842
 843/**
 844 * insert_resource - Inserts a resource in the resource tree
 845 * @parent: parent of the new resource
 846 * @new: new resource to insert
 847 *
 848 * Returns 0 on success, -EBUSY if the resource can't be inserted.
 849 *
 850 * This function is intended for producers of resources, such as FW modules
 851 * and bus drivers.
 852 */
 853int insert_resource(struct resource *parent, struct resource *new)
 854{
 855	struct resource *conflict;
 856
 857	conflict = insert_resource_conflict(parent, new);
 858	return conflict ? -EBUSY : 0;
 859}
 860EXPORT_SYMBOL_GPL(insert_resource);
 861
 862/**
 863 * insert_resource_expand_to_fit - Insert a resource into the resource tree
 864 * @root: root resource descriptor
 865 * @new: new resource to insert
 866 *
 867 * Insert a resource into the resource tree, possibly expanding it in order
 868 * to make it encompass any conflicting resources.
 869 */
 870void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
 871{
 872	if (new->parent)
 873		return;
 874
 875	write_lock(&resource_lock);
 876	for (;;) {
 877		struct resource *conflict;
 878
 879		conflict = __insert_resource(root, new);
 880		if (!conflict)
 881			break;
 882		if (conflict == root)
 883			break;
 884
 885		/* Ok, expand resource to cover the conflict, then try again .. */
 886		if (conflict->start < new->start)
 887			new->start = conflict->start;
 888		if (conflict->end > new->end)
 889			new->end = conflict->end;
 890
 891		pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
 892	}
 893	write_unlock(&resource_lock);
 894}
 895/*
 896 * Not for general consumption, only early boot memory map parsing, PCI
 897 * resource discovery, and late discovery of CXL resources are expected
 898 * to use this interface. The former are built-in and only the latter,
 899 * CXL, is a module.
 900 */
 901EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
 902
 903/**
 904 * remove_resource - Remove a resource in the resource tree
 905 * @old: resource to remove
 906 *
 907 * Returns 0 on success, -EINVAL if the resource is not valid.
 908 *
 909 * This function removes a resource previously inserted by insert_resource()
 910 * or insert_resource_conflict(), and moves the children (if any) up to
 911 * where they were before.  insert_resource() and insert_resource_conflict()
 912 * insert a new resource, and move any conflicting resources down to the
 913 * children of the new resource.
 914 *
 915 * insert_resource(), insert_resource_conflict() and remove_resource() are
 916 * intended for producers of resources, such as FW modules and bus drivers.
 917 */
 918int remove_resource(struct resource *old)
 919{
 920	int retval;
 921
 922	write_lock(&resource_lock);
 923	retval = __release_resource(old, false);
 924	write_unlock(&resource_lock);
 925	return retval;
 926}
 927EXPORT_SYMBOL_GPL(remove_resource);
 928
 929static int __adjust_resource(struct resource *res, resource_size_t start,
 930				resource_size_t size)
 931{
 932	struct resource *tmp, *parent = res->parent;
 933	resource_size_t end = start + size - 1;
 934	int result = -EBUSY;
 935
 936	if (!parent)
 937		goto skip;
 938
 939	if ((start < parent->start) || (end > parent->end))
 940		goto out;
 941
 942	if (res->sibling && (res->sibling->start <= end))
 943		goto out;
 944
 945	tmp = parent->child;
 946	if (tmp != res) {
 947		while (tmp->sibling != res)
 948			tmp = tmp->sibling;
 949		if (start <= tmp->end)
 950			goto out;
 951	}
 952
 953skip:
 954	for (tmp = res->child; tmp; tmp = tmp->sibling)
 955		if ((tmp->start < start) || (tmp->end > end))
 956			goto out;
 957
 958	res->start = start;
 959	res->end = end;
 960	result = 0;
 961
 962 out:
 963	return result;
 964}
 965
 966/**
 967 * adjust_resource - modify a resource's start and size
 968 * @res: resource to modify
 969 * @start: new start value
 970 * @size: new size
 971 *
 972 * Given an existing resource, change its start and size to match the
 973 * arguments.  Returns 0 on success, -EBUSY if it can't fit.
 974 * Existing children of the resource are assumed to be immutable.
 975 */
 976int adjust_resource(struct resource *res, resource_size_t start,
 977		    resource_size_t size)
 978{
 979	int result;
 980
 981	write_lock(&resource_lock);
 982	result = __adjust_resource(res, start, size);
 983	write_unlock(&resource_lock);
 984	return result;
 985}
 986EXPORT_SYMBOL(adjust_resource);
 987
 988static void __init
 989__reserve_region_with_split(struct resource *root, resource_size_t start,
 990			    resource_size_t end, const char *name)
 991{
 992	struct resource *parent = root;
 993	struct resource *conflict;
 994	struct resource *res = alloc_resource(GFP_ATOMIC);
 995	struct resource *next_res = NULL;
 996	int type = resource_type(root);
 997
 998	if (!res)
 999		return;
1000
1001	res->name = name;
1002	res->start = start;
1003	res->end = end;
1004	res->flags = type | IORESOURCE_BUSY;
1005	res->desc = IORES_DESC_NONE;
1006
1007	while (1) {
1008
1009		conflict = __request_resource(parent, res);
1010		if (!conflict) {
1011			if (!next_res)
1012				break;
1013			res = next_res;
1014			next_res = NULL;
1015			continue;
1016		}
1017
1018		/* conflict covered whole area */
1019		if (conflict->start <= res->start &&
1020				conflict->end >= res->end) {
1021			free_resource(res);
1022			WARN_ON(next_res);
1023			break;
1024		}
1025
1026		/* failed, split and try again */
1027		if (conflict->start > res->start) {
1028			end = res->end;
1029			res->end = conflict->start - 1;
1030			if (conflict->end < end) {
1031				next_res = alloc_resource(GFP_ATOMIC);
1032				if (!next_res) {
1033					free_resource(res);
1034					break;
1035				}
1036				next_res->name = name;
1037				next_res->start = conflict->end + 1;
1038				next_res->end = end;
1039				next_res->flags = type | IORESOURCE_BUSY;
1040				next_res->desc = IORES_DESC_NONE;
1041			}
1042		} else {
1043			res->start = conflict->end + 1;
1044		}
1045	}
1046
1047}
1048
1049void __init
1050reserve_region_with_split(struct resource *root, resource_size_t start,
1051			  resource_size_t end, const char *name)
1052{
1053	int abort = 0;
1054
1055	write_lock(&resource_lock);
1056	if (root->start > start || root->end < end) {
1057		pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1058		       (unsigned long long)start, (unsigned long long)end,
1059		       root);
1060		if (start > root->end || end < root->start)
1061			abort = 1;
1062		else {
1063			if (end > root->end)
1064				end = root->end;
1065			if (start < root->start)
1066				start = root->start;
1067			pr_err("fixing request to [0x%llx-0x%llx]\n",
1068			       (unsigned long long)start,
1069			       (unsigned long long)end);
1070		}
1071		dump_stack();
1072	}
1073	if (!abort)
1074		__reserve_region_with_split(root, start, end, name);
1075	write_unlock(&resource_lock);
1076}
1077
1078/**
1079 * resource_alignment - calculate resource's alignment
1080 * @res: resource pointer
1081 *
1082 * Returns alignment on success, 0 (invalid alignment) on failure.
1083 */
1084resource_size_t resource_alignment(struct resource *res)
1085{
1086	switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1087	case IORESOURCE_SIZEALIGN:
1088		return resource_size(res);
1089	case IORESOURCE_STARTALIGN:
1090		return res->start;
1091	default:
1092		return 0;
1093	}
1094}
1095
1096/*
1097 * This is compatibility stuff for IO resources.
1098 *
1099 * Note how this, unlike the above, knows about
1100 * the IO flag meanings (busy etc).
1101 *
1102 * request_region creates a new busy region.
1103 *
1104 * release_region releases a matching busy region.
1105 */
1106
1107static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1108
1109static struct inode *iomem_inode;
1110
1111#ifdef CONFIG_IO_STRICT_DEVMEM
1112static void revoke_iomem(struct resource *res)
1113{
1114	/* pairs with smp_store_release() in iomem_init_inode() */
1115	struct inode *inode = smp_load_acquire(&iomem_inode);
1116
1117	/*
1118	 * Check that the initialization has completed. Losing the race
1119	 * is ok because it means drivers are claiming resources before
1120	 * the fs_initcall level of init and prevent iomem_get_mapping users
1121	 * from establishing mappings.
1122	 */
1123	if (!inode)
1124		return;
1125
1126	/*
1127	 * The expectation is that the driver has successfully marked
1128	 * the resource busy by this point, so devmem_is_allowed()
1129	 * should start returning false, however for performance this
1130	 * does not iterate the entire resource range.
1131	 */
1132	if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1133	    devmem_is_allowed(PHYS_PFN(res->end))) {
1134		/*
1135		 * *cringe* iomem=relaxed says "go ahead, what's the
1136		 * worst that can happen?"
1137		 */
1138		return;
1139	}
1140
1141	unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1142}
1143#else
1144static void revoke_iomem(struct resource *res) {}
1145#endif
1146
1147struct address_space *iomem_get_mapping(void)
1148{
1149	/*
1150	 * This function is only called from file open paths, hence guaranteed
1151	 * that fs_initcalls have completed and no need to check for NULL. But
1152	 * since revoke_iomem can be called before the initcall we still need
1153	 * the barrier to appease checkers.
1154	 */
1155	return smp_load_acquire(&iomem_inode)->i_mapping;
1156}
1157
1158static int __request_region_locked(struct resource *res, struct resource *parent,
1159				   resource_size_t start, resource_size_t n,
1160				   const char *name, int flags)
1161{
1162	DECLARE_WAITQUEUE(wait, current);
1163
1164	res->name = name;
1165	res->start = start;
1166	res->end = start + n - 1;
1167
1168	for (;;) {
1169		struct resource *conflict;
1170
1171		res->flags = resource_type(parent) | resource_ext_type(parent);
1172		res->flags |= IORESOURCE_BUSY | flags;
1173		res->desc = parent->desc;
1174
1175		conflict = __request_resource(parent, res);
1176		if (!conflict)
1177			break;
1178		/*
1179		 * mm/hmm.c reserves physical addresses which then
1180		 * become unavailable to other users.  Conflicts are
1181		 * not expected.  Warn to aid debugging if encountered.
1182		 */
1183		if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1184			pr_warn("Unaddressable device %s %pR conflicts with %pR",
1185				conflict->name, conflict, res);
1186		}
1187		if (conflict != parent) {
1188			if (!(conflict->flags & IORESOURCE_BUSY)) {
1189				parent = conflict;
1190				continue;
1191			}
1192		}
1193		if (conflict->flags & flags & IORESOURCE_MUXED) {
1194			add_wait_queue(&muxed_resource_wait, &wait);
1195			write_unlock(&resource_lock);
1196			set_current_state(TASK_UNINTERRUPTIBLE);
1197			schedule();
1198			remove_wait_queue(&muxed_resource_wait, &wait);
1199			write_lock(&resource_lock);
1200			continue;
1201		}
1202		/* Uhhuh, that didn't work out.. */
1203		return -EBUSY;
1204	}
1205
1206	return 0;
1207}
1208
1209/**
1210 * __request_region - create a new busy resource region
1211 * @parent: parent resource descriptor
1212 * @start: resource start address
1213 * @n: resource region size
1214 * @name: reserving caller's ID string
1215 * @flags: IO resource flags
1216 */
1217struct resource *__request_region(struct resource *parent,
1218				  resource_size_t start, resource_size_t n,
1219				  const char *name, int flags)
1220{
1221	struct resource *res = alloc_resource(GFP_KERNEL);
1222	int ret;
1223
1224	if (!res)
1225		return NULL;
1226
1227	write_lock(&resource_lock);
1228	ret = __request_region_locked(res, parent, start, n, name, flags);
1229	write_unlock(&resource_lock);
1230
1231	if (ret) {
1232		free_resource(res);
1233		return NULL;
1234	}
1235
1236	if (parent == &iomem_resource)
1237		revoke_iomem(res);
1238
1239	return res;
1240}
1241EXPORT_SYMBOL(__request_region);
1242
1243/**
1244 * __release_region - release a previously reserved resource region
1245 * @parent: parent resource descriptor
1246 * @start: resource start address
1247 * @n: resource region size
1248 *
1249 * The described resource region must match a currently busy region.
1250 */
1251void __release_region(struct resource *parent, resource_size_t start,
1252		      resource_size_t n)
1253{
1254	struct resource **p;
1255	resource_size_t end;
1256
1257	p = &parent->child;
1258	end = start + n - 1;
1259
1260	write_lock(&resource_lock);
1261
1262	for (;;) {
1263		struct resource *res = *p;
1264
1265		if (!res)
1266			break;
1267		if (res->start <= start && res->end >= end) {
1268			if (!(res->flags & IORESOURCE_BUSY)) {
1269				p = &res->child;
1270				continue;
1271			}
1272			if (res->start != start || res->end != end)
1273				break;
1274			*p = res->sibling;
1275			write_unlock(&resource_lock);
1276			if (res->flags & IORESOURCE_MUXED)
1277				wake_up(&muxed_resource_wait);
1278			free_resource(res);
1279			return;
1280		}
1281		p = &res->sibling;
1282	}
1283
1284	write_unlock(&resource_lock);
1285
1286	pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1287}
1288EXPORT_SYMBOL(__release_region);
1289
1290#ifdef CONFIG_MEMORY_HOTREMOVE
1291/**
1292 * release_mem_region_adjustable - release a previously reserved memory region
1293 * @start: resource start address
1294 * @size: resource region size
1295 *
1296 * This interface is intended for memory hot-delete.  The requested region
1297 * is released from a currently busy memory resource.  The requested region
1298 * must either match exactly or fit into a single busy resource entry.  In
1299 * the latter case, the remaining resource is adjusted accordingly.
1300 * Existing children of the busy memory resource must be immutable in the
1301 * request.
1302 *
1303 * Note:
1304 * - Additional release conditions, such as overlapping region, can be
1305 *   supported after they are confirmed as valid cases.
1306 * - When a busy memory resource gets split into two entries, the code
1307 *   assumes that all children remain in the lower address entry for
1308 *   simplicity.  Enhance this logic when necessary.
1309 */
1310void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1311{
1312	struct resource *parent = &iomem_resource;
1313	struct resource *new_res = NULL;
1314	bool alloc_nofail = false;
1315	struct resource **p;
1316	struct resource *res;
1317	resource_size_t end;
1318
1319	end = start + size - 1;
1320	if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1321		return;
1322
1323	/*
1324	 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1325	 * just before releasing the region. This is highly unlikely to
1326	 * fail - let's play save and make it never fail as the caller cannot
1327	 * perform any error handling (e.g., trying to re-add memory will fail
1328	 * similarly).
1329	 */
1330retry:
1331	new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1332
1333	p = &parent->child;
1334	write_lock(&resource_lock);
1335
1336	while ((res = *p)) {
1337		if (res->start >= end)
1338			break;
1339
1340		/* look for the next resource if it does not fit into */
1341		if (res->start > start || res->end < end) {
1342			p = &res->sibling;
1343			continue;
1344		}
1345
1346		/*
1347		 * All memory regions added from memory-hotplug path have the
1348		 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
1349		 * this flag, we know that we are dealing with a resource coming
1350		 * from HMM/devm. HMM/devm use another mechanism to add/release
1351		 * a resource. This goes via devm_request_mem_region and
1352		 * devm_release_mem_region.
1353		 * HMM/devm take care to release their resources when they want,
1354		 * so if we are dealing with them, let us just back off here.
1355		 */
1356		if (!(res->flags & IORESOURCE_SYSRAM)) {
1357			break;
1358		}
1359
1360		if (!(res->flags & IORESOURCE_MEM))
1361			break;
1362
1363		if (!(res->flags & IORESOURCE_BUSY)) {
1364			p = &res->child;
1365			continue;
1366		}
1367
1368		/* found the target resource; let's adjust accordingly */
1369		if (res->start == start && res->end == end) {
1370			/* free the whole entry */
1371			*p = res->sibling;
1372			free_resource(res);
1373		} else if (res->start == start && res->end != end) {
1374			/* adjust the start */
1375			WARN_ON_ONCE(__adjust_resource(res, end + 1,
1376						       res->end - end));
1377		} else if (res->start != start && res->end == end) {
1378			/* adjust the end */
1379			WARN_ON_ONCE(__adjust_resource(res, res->start,
1380						       start - res->start));
1381		} else {
1382			/* split into two entries - we need a new resource */
1383			if (!new_res) {
1384				new_res = alloc_resource(GFP_ATOMIC);
1385				if (!new_res) {
1386					alloc_nofail = true;
1387					write_unlock(&resource_lock);
1388					goto retry;
1389				}
1390			}
1391			new_res->name = res->name;
1392			new_res->start = end + 1;
1393			new_res->end = res->end;
1394			new_res->flags = res->flags;
1395			new_res->desc = res->desc;
1396			new_res->parent = res->parent;
1397			new_res->sibling = res->sibling;
1398			new_res->child = NULL;
1399
1400			if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1401							   start - res->start)))
1402				break;
1403			res->sibling = new_res;
1404			new_res = NULL;
1405		}
1406
1407		break;
1408	}
1409
1410	write_unlock(&resource_lock);
1411	free_resource(new_res);
1412}
1413#endif	/* CONFIG_MEMORY_HOTREMOVE */
1414
1415#ifdef CONFIG_MEMORY_HOTPLUG
1416static bool system_ram_resources_mergeable(struct resource *r1,
1417					   struct resource *r2)
1418{
1419	/* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1420	return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1421	       r1->name == r2->name && r1->desc == r2->desc &&
1422	       !r1->child && !r2->child;
1423}
1424
1425/**
1426 * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1427 *	merge it with adjacent, mergeable resources
1428 * @res: resource descriptor
1429 *
1430 * This interface is intended for memory hotplug, whereby lots of contiguous
1431 * system ram resources are added (e.g., via add_memory*()) by a driver, and
1432 * the actual resource boundaries are not of interest (e.g., it might be
1433 * relevant for DIMMs). Only resources that are marked mergeable, that have the
1434 * same parent, and that don't have any children are considered. All mergeable
1435 * resources must be immutable during the request.
1436 *
1437 * Note:
1438 * - The caller has to make sure that no pointers to resources that are
1439 *   marked mergeable are used anymore after this call - the resource might
1440 *   be freed and the pointer might be stale!
1441 * - release_mem_region_adjustable() will split on demand on memory hotunplug
1442 */
1443void merge_system_ram_resource(struct resource *res)
1444{
1445	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1446	struct resource *cur;
1447
1448	if (WARN_ON_ONCE((res->flags & flags) != flags))
1449		return;
1450
1451	write_lock(&resource_lock);
1452	res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1453
1454	/* Try to merge with next item in the list. */
1455	cur = res->sibling;
1456	if (cur && system_ram_resources_mergeable(res, cur)) {
1457		res->end = cur->end;
1458		res->sibling = cur->sibling;
1459		free_resource(cur);
1460	}
1461
1462	/* Try to merge with previous item in the list. */
1463	cur = res->parent->child;
1464	while (cur && cur->sibling != res)
1465		cur = cur->sibling;
1466	if (cur && system_ram_resources_mergeable(cur, res)) {
1467		cur->end = res->end;
1468		cur->sibling = res->sibling;
1469		free_resource(res);
1470	}
1471	write_unlock(&resource_lock);
1472}
1473#endif	/* CONFIG_MEMORY_HOTPLUG */
1474
1475/*
1476 * Managed region resource
1477 */
1478static void devm_resource_release(struct device *dev, void *ptr)
1479{
1480	struct resource **r = ptr;
1481
1482	release_resource(*r);
1483}
1484
1485/**
1486 * devm_request_resource() - request and reserve an I/O or memory resource
1487 * @dev: device for which to request the resource
1488 * @root: root of the resource tree from which to request the resource
1489 * @new: descriptor of the resource to request
1490 *
1491 * This is a device-managed version of request_resource(). There is usually
1492 * no need to release resources requested by this function explicitly since
1493 * that will be taken care of when the device is unbound from its driver.
1494 * If for some reason the resource needs to be released explicitly, because
1495 * of ordering issues for example, drivers must call devm_release_resource()
1496 * rather than the regular release_resource().
1497 *
1498 * When a conflict is detected between any existing resources and the newly
1499 * requested resource, an error message will be printed.
1500 *
1501 * Returns 0 on success or a negative error code on failure.
1502 */
1503int devm_request_resource(struct device *dev, struct resource *root,
1504			  struct resource *new)
1505{
1506	struct resource *conflict, **ptr;
1507
1508	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1509	if (!ptr)
1510		return -ENOMEM;
1511
1512	*ptr = new;
1513
1514	conflict = request_resource_conflict(root, new);
1515	if (conflict) {
1516		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1517			new, conflict->name, conflict);
1518		devres_free(ptr);
1519		return -EBUSY;
1520	}
1521
1522	devres_add(dev, ptr);
1523	return 0;
1524}
1525EXPORT_SYMBOL(devm_request_resource);
1526
1527static int devm_resource_match(struct device *dev, void *res, void *data)
1528{
1529	struct resource **ptr = res;
1530
1531	return *ptr == data;
1532}
1533
1534/**
1535 * devm_release_resource() - release a previously requested resource
1536 * @dev: device for which to release the resource
1537 * @new: descriptor of the resource to release
1538 *
1539 * Releases a resource previously requested using devm_request_resource().
1540 */
1541void devm_release_resource(struct device *dev, struct resource *new)
1542{
1543	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1544			       new));
1545}
1546EXPORT_SYMBOL(devm_release_resource);
1547
1548struct region_devres {
1549	struct resource *parent;
1550	resource_size_t start;
1551	resource_size_t n;
1552};
1553
1554static void devm_region_release(struct device *dev, void *res)
1555{
1556	struct region_devres *this = res;
1557
1558	__release_region(this->parent, this->start, this->n);
1559}
1560
1561static int devm_region_match(struct device *dev, void *res, void *match_data)
1562{
1563	struct region_devres *this = res, *match = match_data;
1564
1565	return this->parent == match->parent &&
1566		this->start == match->start && this->n == match->n;
1567}
1568
1569struct resource *
1570__devm_request_region(struct device *dev, struct resource *parent,
1571		      resource_size_t start, resource_size_t n, const char *name)
1572{
1573	struct region_devres *dr = NULL;
1574	struct resource *res;
1575
1576	dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1577			  GFP_KERNEL);
1578	if (!dr)
1579		return NULL;
1580
1581	dr->parent = parent;
1582	dr->start = start;
1583	dr->n = n;
1584
1585	res = __request_region(parent, start, n, name, 0);
1586	if (res)
1587		devres_add(dev, dr);
1588	else
1589		devres_free(dr);
1590
1591	return res;
1592}
1593EXPORT_SYMBOL(__devm_request_region);
1594
1595void __devm_release_region(struct device *dev, struct resource *parent,
1596			   resource_size_t start, resource_size_t n)
1597{
1598	struct region_devres match_data = { parent, start, n };
1599
1600	__release_region(parent, start, n);
1601	WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1602			       &match_data));
1603}
1604EXPORT_SYMBOL(__devm_release_region);
1605
1606/*
1607 * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1608 */
1609#define MAXRESERVE 4
1610static int __init reserve_setup(char *str)
1611{
1612	static int reserved;
1613	static struct resource reserve[MAXRESERVE];
1614
1615	for (;;) {
1616		unsigned int io_start, io_num;
1617		int x = reserved;
1618		struct resource *parent;
1619
1620		if (get_option(&str, &io_start) != 2)
1621			break;
1622		if (get_option(&str, &io_num) == 0)
1623			break;
1624		if (x < MAXRESERVE) {
1625			struct resource *res = reserve + x;
1626
1627			/*
1628			 * If the region starts below 0x10000, we assume it's
1629			 * I/O port space; otherwise assume it's memory.
1630			 */
1631			if (io_start < 0x10000) {
1632				res->flags = IORESOURCE_IO;
1633				parent = &ioport_resource;
1634			} else {
1635				res->flags = IORESOURCE_MEM;
1636				parent = &iomem_resource;
1637			}
1638			res->name = "reserved";
1639			res->start = io_start;
1640			res->end = io_start + io_num - 1;
1641			res->flags |= IORESOURCE_BUSY;
1642			res->desc = IORES_DESC_NONE;
1643			res->child = NULL;
1644			if (request_resource(parent, res) == 0)
1645				reserved = x+1;
1646		}
1647	}
1648	return 1;
1649}
1650__setup("reserve=", reserve_setup);
1651
1652/*
1653 * Check if the requested addr and size spans more than any slot in the
1654 * iomem resource tree.
1655 */
1656int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1657{
1658	struct resource *p = &iomem_resource;
1659	resource_size_t end = addr + size - 1;
 
1660	int err = 0;
1661	loff_t l;
1662
1663	read_lock(&resource_lock);
1664	for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1665		/*
1666		 * We can probably skip the resources without
1667		 * IORESOURCE_IO attribute?
1668		 */
1669		if (p->start > end)
1670			continue;
1671		if (p->end < addr)
1672			continue;
1673		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1674		    PFN_DOWN(p->end) >= PFN_DOWN(end))
1675			continue;
1676		/*
1677		 * if a resource is "BUSY", it's not a hardware resource
1678		 * but a driver mapping of such a resource; we don't want
1679		 * to warn for those; some drivers legitimately map only
1680		 * partial hardware resources. (example: vesafb)
1681		 */
1682		if (p->flags & IORESOURCE_BUSY)
1683			continue;
1684
1685		pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1686			&addr, &end, p->name, p);
1687		err = -1;
1688		break;
1689	}
1690	read_unlock(&resource_lock);
1691
1692	return err;
1693}
1694
1695#ifdef CONFIG_STRICT_DEVMEM
1696static int strict_iomem_checks = 1;
1697#else
1698static int strict_iomem_checks;
1699#endif
1700
1701/*
1702 * Check if an address is exclusive to the kernel and must not be mapped to
1703 * user space, for example, via /dev/mem.
1704 *
1705 * Returns true if exclusive to the kernel, otherwise returns false.
1706 */
1707bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1708{
1709	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1710						  IORESOURCE_EXCLUSIVE;
1711	bool skip_children = false, err = false;
1712	struct resource *p;
1713
1714	read_lock(&resource_lock);
1715	for_each_resource(root, p, skip_children) {
1716		if (p->start >= addr + size)
1717			break;
1718		if (p->end < addr) {
1719			skip_children = true;
1720			continue;
1721		}
1722		skip_children = false;
1723
1724		/*
1725		 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1726		 * IORESOURCE_EXCLUSIVE is set, even if they
1727		 * are not busy and even if "iomem=relaxed" is set. The
1728		 * responsible driver dynamically adds/removes system RAM within
1729		 * such an area and uncontrolled access is dangerous.
1730		 */
1731		if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1732			err = true;
1733			break;
1734		}
1735
1736		/*
1737		 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1738		 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1739		 * resource is busy.
1740		 */
1741		if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1742			continue;
1743		if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1744				|| p->flags & IORESOURCE_EXCLUSIVE) {
1745			err = true;
1746			break;
1747		}
1748	}
1749	read_unlock(&resource_lock);
1750
1751	return err;
1752}
1753
1754bool iomem_is_exclusive(u64 addr)
1755{
1756	return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1757				     PAGE_SIZE);
1758}
1759
1760struct resource_entry *resource_list_create_entry(struct resource *res,
1761						  size_t extra_size)
1762{
1763	struct resource_entry *entry;
1764
1765	entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1766	if (entry) {
1767		INIT_LIST_HEAD(&entry->node);
1768		entry->res = res ? res : &entry->__res;
1769	}
1770
1771	return entry;
1772}
1773EXPORT_SYMBOL(resource_list_create_entry);
1774
1775void resource_list_free(struct list_head *head)
1776{
1777	struct resource_entry *entry, *tmp;
1778
1779	list_for_each_entry_safe(entry, tmp, head, node)
1780		resource_list_destroy_entry(entry);
1781}
1782EXPORT_SYMBOL(resource_list_free);
1783
1784#ifdef CONFIG_GET_FREE_REGION
1785#define GFR_DESCENDING		(1UL << 0)
1786#define GFR_REQUEST_REGION	(1UL << 1)
1787#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
1788
1789static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1790				 resource_size_t align, unsigned long flags)
1791{
1792	if (flags & GFR_DESCENDING) {
1793		resource_size_t end;
1794
1795		end = min_t(resource_size_t, base->end,
1796			    (1ULL << MAX_PHYSMEM_BITS) - 1);
1797		return end - size + 1;
1798	}
1799
1800	return ALIGN(base->start, align);
1801}
1802
1803static bool gfr_continue(struct resource *base, resource_size_t addr,
1804			 resource_size_t size, unsigned long flags)
1805{
1806	if (flags & GFR_DESCENDING)
1807		return addr > size && addr >= base->start;
1808	/*
1809	 * In the ascend case be careful that the last increment by
1810	 * @size did not wrap 0.
1811	 */
1812	return addr > addr - size &&
1813	       addr <= min_t(resource_size_t, base->end,
1814			     (1ULL << MAX_PHYSMEM_BITS) - 1);
1815}
1816
1817static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1818				unsigned long flags)
1819{
1820	if (flags & GFR_DESCENDING)
1821		return addr - size;
1822	return addr + size;
1823}
1824
1825static void remove_free_mem_region(void *_res)
1826{
1827	struct resource *res = _res;
1828
1829	if (res->parent)
1830		remove_resource(res);
1831	free_resource(res);
1832}
1833
1834static struct resource *
1835get_free_mem_region(struct device *dev, struct resource *base,
1836		    resource_size_t size, const unsigned long align,
1837		    const char *name, const unsigned long desc,
1838		    const unsigned long flags)
1839{
1840	resource_size_t addr;
1841	struct resource *res;
1842	struct region_devres *dr = NULL;
1843
1844	size = ALIGN(size, align);
1845
1846	res = alloc_resource(GFP_KERNEL);
1847	if (!res)
1848		return ERR_PTR(-ENOMEM);
1849
1850	if (dev && (flags & GFR_REQUEST_REGION)) {
1851		dr = devres_alloc(devm_region_release,
1852				sizeof(struct region_devres), GFP_KERNEL);
1853		if (!dr) {
1854			free_resource(res);
1855			return ERR_PTR(-ENOMEM);
1856		}
1857	} else if (dev) {
1858		if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1859			return ERR_PTR(-ENOMEM);
1860	}
1861
1862	write_lock(&resource_lock);
1863	for (addr = gfr_start(base, size, align, flags);
1864	     gfr_continue(base, addr, size, flags);
1865	     addr = gfr_next(addr, size, flags)) {
1866		if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1867		    REGION_DISJOINT)
1868			continue;
1869
1870		if (flags & GFR_REQUEST_REGION) {
1871			if (__request_region_locked(res, &iomem_resource, addr,
1872						    size, name, 0))
1873				break;
1874
1875			if (dev) {
1876				dr->parent = &iomem_resource;
1877				dr->start = addr;
1878				dr->n = size;
1879				devres_add(dev, dr);
1880			}
1881
1882			res->desc = desc;
1883			write_unlock(&resource_lock);
1884
1885
1886			/*
1887			 * A driver is claiming this region so revoke any
1888			 * mappings.
1889			 */
1890			revoke_iomem(res);
1891		} else {
1892			res->start = addr;
1893			res->end = addr + size - 1;
1894			res->name = name;
1895			res->desc = desc;
1896			res->flags = IORESOURCE_MEM;
1897
1898			/*
1899			 * Only succeed if the resource hosts an exclusive
1900			 * range after the insert
1901			 */
1902			if (__insert_resource(base, res) || res->child)
1903				break;
1904
1905			write_unlock(&resource_lock);
1906		}
1907
1908		return res;
1909	}
1910	write_unlock(&resource_lock);
1911
1912	if (flags & GFR_REQUEST_REGION) {
1913		free_resource(res);
1914		devres_free(dr);
1915	} else if (dev)
1916		devm_release_action(dev, remove_free_mem_region, res);
1917
1918	return ERR_PTR(-ERANGE);
1919}
1920
1921/**
1922 * devm_request_free_mem_region - find free region for device private memory
1923 *
1924 * @dev: device struct to bind the resource to
1925 * @size: size in bytes of the device memory to add
1926 * @base: resource tree to look in
1927 *
1928 * This function tries to find an empty range of physical address big enough to
1929 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1930 * memory, which in turn allocates struct pages.
1931 */
1932struct resource *devm_request_free_mem_region(struct device *dev,
1933		struct resource *base, unsigned long size)
1934{
1935	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1936
1937	return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
1938				   dev_name(dev),
1939				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1940}
1941EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1942
1943struct resource *request_free_mem_region(struct resource *base,
1944		unsigned long size, const char *name)
1945{
1946	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
1947
1948	return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
1949				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
1950}
1951EXPORT_SYMBOL_GPL(request_free_mem_region);
1952
1953/**
1954 * alloc_free_mem_region - find a free region relative to @base
1955 * @base: resource that will parent the new resource
1956 * @size: size in bytes of memory to allocate from @base
1957 * @align: alignment requirements for the allocation
1958 * @name: resource name
1959 *
1960 * Buses like CXL, that can dynamically instantiate new memory regions,
1961 * need a method to allocate physical address space for those regions.
1962 * Allocate and insert a new resource to cover a free, unclaimed by a
1963 * descendant of @base, range in the span of @base.
1964 */
1965struct resource *alloc_free_mem_region(struct resource *base,
1966				       unsigned long size, unsigned long align,
1967				       const char *name)
1968{
1969	/* Default of ascending direction and insert resource */
1970	unsigned long flags = 0;
1971
1972	return get_free_mem_region(NULL, base, size, align, name,
1973				   IORES_DESC_NONE, flags);
1974}
1975EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
1976#endif /* CONFIG_GET_FREE_REGION */
1977
1978static int __init strict_iomem(char *str)
1979{
1980	if (strstr(str, "relaxed"))
1981		strict_iomem_checks = 0;
1982	if (strstr(str, "strict"))
1983		strict_iomem_checks = 1;
1984	return 1;
1985}
1986
1987static int iomem_fs_init_fs_context(struct fs_context *fc)
1988{
1989	return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1990}
1991
1992static struct file_system_type iomem_fs_type = {
1993	.name		= "iomem",
1994	.owner		= THIS_MODULE,
1995	.init_fs_context = iomem_fs_init_fs_context,
1996	.kill_sb	= kill_anon_super,
1997};
1998
1999static int __init iomem_init_inode(void)
2000{
2001	static struct vfsmount *iomem_vfs_mount;
2002	static int iomem_fs_cnt;
2003	struct inode *inode;
2004	int rc;
2005
2006	rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2007	if (rc < 0) {
2008		pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2009		return rc;
2010	}
2011
2012	inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2013	if (IS_ERR(inode)) {
2014		rc = PTR_ERR(inode);
2015		pr_err("Cannot allocate inode for iomem: %d\n", rc);
2016		simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2017		return rc;
2018	}
2019
2020	/*
2021	 * Publish iomem revocation inode initialized.
2022	 * Pairs with smp_load_acquire() in revoke_iomem().
2023	 */
2024	smp_store_release(&iomem_inode, inode);
2025
2026	return 0;
2027}
2028
2029fs_initcall(iomem_init_inode);
2030
2031__setup("iomem=", strict_iomem);