Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/device.h>
  3#include <linux/types.h>
  4#include <linux/io.h>
  5#include <linux/mm.h>
  6
  7#ifndef ioremap_cache
  8/* temporary while we convert existing ioremap_cache users to memremap */
  9__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
 10{
 11	return ioremap(offset, size);
 12}
 13#endif
 14
 15#ifndef arch_memremap_wb
 16static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
 17{
 18	return (__force void *)ioremap_cache(offset, size);
 19}
 20#endif
 21
 22#ifndef arch_memremap_can_ram_remap
 23static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
 24					unsigned long flags)
 25{
 26	return true;
 27}
 28#endif
 29
 30static void *try_ram_remap(resource_size_t offset, size_t size,
 31			   unsigned long flags)
 32{
 33	unsigned long pfn = PHYS_PFN(offset);
 34
 35	/* In the simple case just return the existing linear address */
 36	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
 37	    arch_memremap_can_ram_remap(offset, size, flags))
 38		return __va(offset);
 39
 40	return NULL; /* fallback to arch_memremap_wb */
 41}
 42
 43/**
 44 * memremap() - remap an iomem_resource as cacheable memory
 45 * @offset: iomem resource start address
 46 * @size: size of remap
 47 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
 48 *		  MEMREMAP_ENC, MEMREMAP_DEC
 49 *
 50 * memremap() is "ioremap" for cases where it is known that the resource
 51 * being mapped does not have i/o side effects and the __iomem
 52 * annotation is not applicable. In the case of multiple flags, the different
 53 * mapping types will be attempted in the order listed below until one of
 54 * them succeeds.
 55 *
 56 * MEMREMAP_WB - matches the default mapping for System RAM on
 57 * the architecture.  This is usually a read-allocate write-back cache.
 58 * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
 59 * memremap() will bypass establishing a new mapping and instead return
 60 * a pointer into the direct map.
 61 *
 62 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 63 * cache or are written through to memory and never exist in a
 64 * cache-dirty state with respect to program visibility.  Attempts to
 65 * map System RAM with this mapping type will fail.
 66 *
 67 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
 68 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
 69 * uncached. Attempts to map System RAM with this mapping type will fail.
 70 */
 71void *memremap(resource_size_t offset, size_t size, unsigned long flags)
 72{
 73	int is_ram = region_intersects(offset, size,
 74				       IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 75	void *addr = NULL;
 76
 77	if (!flags)
 78		return NULL;
 79
 80	if (is_ram == REGION_MIXED) {
 81		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
 82				&offset, (unsigned long) size);
 83		return NULL;
 84	}
 85
 86	/* Try all mapping types requested until one returns non-NULL */
 87	if (flags & MEMREMAP_WB) {
 88		/*
 89		 * MEMREMAP_WB is special in that it can be satisfied
 90		 * from the direct map.  Some archs depend on the
 91		 * capability of memremap() to autodetect cases where
 92		 * the requested range is potentially in System RAM.
 93		 */
 94		if (is_ram == REGION_INTERSECTS)
 95			addr = try_ram_remap(offset, size, flags);
 96		if (!addr)
 97			addr = arch_memremap_wb(offset, size);
 98	}
 99
100	/*
101	 * If we don't have a mapping yet and other request flags are
102	 * present then we will be attempting to establish a new virtual
103	 * address mapping.  Enforce that this mapping is not aliasing
104	 * System RAM.
105	 */
106	if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
107		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
108				&offset, (unsigned long) size);
109		return NULL;
110	}
111
112	if (!addr && (flags & MEMREMAP_WT))
113		addr = ioremap_wt(offset, size);
114
115	if (!addr && (flags & MEMREMAP_WC))
116		addr = ioremap_wc(offset, size);
117
118	return addr;
119}
120EXPORT_SYMBOL(memremap);
121
122void memunmap(void *addr)
123{
124	if (is_ioremap_addr(addr))
125		iounmap((void __iomem *) addr);
126}
127EXPORT_SYMBOL(memunmap);
128
129static void devm_memremap_release(struct device *dev, void *res)
130{
131	memunmap(*(void **)res);
132}
133
134static int devm_memremap_match(struct device *dev, void *res, void *match_data)
135{
136	return *(void **)res == match_data;
137}
138
139void *devm_memremap(struct device *dev, resource_size_t offset,
140		size_t size, unsigned long flags)
141{
142	void **ptr, *addr;
143
144	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
145			dev_to_node(dev));
146	if (!ptr)
147		return ERR_PTR(-ENOMEM);
148
149	addr = memremap(offset, size, flags);
150	if (addr) {
151		*ptr = addr;
152		devres_add(dev, ptr);
153	} else {
154		devres_free(ptr);
155		return ERR_PTR(-ENXIO);
156	}
157
158	return addr;
159}
160EXPORT_SYMBOL(devm_memremap);
161
162void devm_memunmap(struct device *dev, void *addr)
163{
164	WARN_ON(devres_release(dev, devm_memremap_release,
165				devm_memremap_match, addr));
166}
167EXPORT_SYMBOL(devm_memunmap);
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/device.h>
  3#include <linux/types.h>
  4#include <linux/io.h>
  5#include <linux/mm.h>
  6
  7#ifndef ioremap_cache
  8/* temporary while we convert existing ioremap_cache users to memremap */
  9__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
 10{
 11	return ioremap(offset, size);
 12}
 13#endif
 14
 15#ifndef arch_memremap_wb
 16static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
 17{
 18	return (__force void *)ioremap_cache(offset, size);
 19}
 20#endif
 21
 22#ifndef arch_memremap_can_ram_remap
 23static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
 24					unsigned long flags)
 25{
 26	return true;
 27}
 28#endif
 29
 30static void *try_ram_remap(resource_size_t offset, size_t size,
 31			   unsigned long flags)
 32{
 33	unsigned long pfn = PHYS_PFN(offset);
 34
 35	/* In the simple case just return the existing linear address */
 36	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
 37	    arch_memremap_can_ram_remap(offset, size, flags))
 38		return __va(offset);
 39
 40	return NULL; /* fallback to arch_memremap_wb */
 41}
 42
 43/**
 44 * memremap() - remap an iomem_resource as cacheable memory
 45 * @offset: iomem resource start address
 46 * @size: size of remap
 47 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
 48 *		  MEMREMAP_ENC, MEMREMAP_DEC
 49 *
 50 * memremap() is "ioremap" for cases where it is known that the resource
 51 * being mapped does not have i/o side effects and the __iomem
 52 * annotation is not applicable. In the case of multiple flags, the different
 53 * mapping types will be attempted in the order listed below until one of
 54 * them succeeds.
 55 *
 56 * MEMREMAP_WB - matches the default mapping for System RAM on
 57 * the architecture.  This is usually a read-allocate write-back cache.
 58 * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
 59 * memremap() will bypass establishing a new mapping and instead return
 60 * a pointer into the direct map.
 61 *
 62 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 63 * cache or are written through to memory and never exist in a
 64 * cache-dirty state with respect to program visibility.  Attempts to
 65 * map System RAM with this mapping type will fail.
 66 *
 67 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
 68 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
 69 * uncached. Attempts to map System RAM with this mapping type will fail.
 70 */
 71void *memremap(resource_size_t offset, size_t size, unsigned long flags)
 72{
 73	int is_ram = region_intersects(offset, size,
 74				       IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 75	void *addr = NULL;
 76
 77	if (!flags)
 78		return NULL;
 79
 80	if (is_ram == REGION_MIXED) {
 81		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
 82				&offset, (unsigned long) size);
 83		return NULL;
 84	}
 85
 86	/* Try all mapping types requested until one returns non-NULL */
 87	if (flags & MEMREMAP_WB) {
 88		/*
 89		 * MEMREMAP_WB is special in that it can be satisfied
 90		 * from the direct map.  Some archs depend on the
 91		 * capability of memremap() to autodetect cases where
 92		 * the requested range is potentially in System RAM.
 93		 */
 94		if (is_ram == REGION_INTERSECTS)
 95			addr = try_ram_remap(offset, size, flags);
 96		if (!addr)
 97			addr = arch_memremap_wb(offset, size);
 98	}
 99
100	/*
101	 * If we don't have a mapping yet and other request flags are
102	 * present then we will be attempting to establish a new virtual
103	 * address mapping.  Enforce that this mapping is not aliasing
104	 * System RAM.
105	 */
106	if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
107		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
108				&offset, (unsigned long) size);
109		return NULL;
110	}
111
112	if (!addr && (flags & MEMREMAP_WT))
113		addr = ioremap_wt(offset, size);
114
115	if (!addr && (flags & MEMREMAP_WC))
116		addr = ioremap_wc(offset, size);
117
118	return addr;
119}
120EXPORT_SYMBOL(memremap);
121
122void memunmap(void *addr)
123{
124	if (is_ioremap_addr(addr))
125		iounmap((void __iomem *) addr);
126}
127EXPORT_SYMBOL(memunmap);
128
129static void devm_memremap_release(struct device *dev, void *res)
130{
131	memunmap(*(void **)res);
132}
133
134static int devm_memremap_match(struct device *dev, void *res, void *match_data)
135{
136	return *(void **)res == match_data;
137}
138
139void *devm_memremap(struct device *dev, resource_size_t offset,
140		size_t size, unsigned long flags)
141{
142	void **ptr, *addr;
143
144	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
145			dev_to_node(dev));
146	if (!ptr)
147		return ERR_PTR(-ENOMEM);
148
149	addr = memremap(offset, size, flags);
150	if (addr) {
151		*ptr = addr;
152		devres_add(dev, ptr);
153	} else {
154		devres_free(ptr);
155		return ERR_PTR(-ENXIO);
156	}
157
158	return addr;
159}
160EXPORT_SYMBOL(devm_memremap);
161
162void devm_memunmap(struct device *dev, void *addr)
163{
164	WARN_ON(devres_release(dev, devm_memremap_release,
165				devm_memremap_match, addr));
166}
167EXPORT_SYMBOL(devm_memunmap);