Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/arch/arm/mm/ioremap.c
  3 *
  4 * Re-map IO memory to kernel address space so that we can access it.
  5 *
  6 * (C) Copyright 1995 1996 Linus Torvalds
  7 *
  8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
  9 * Hacked to allow all architectures to build, and various cleanups
 10 * by Russell King
 11 *
 12 * This allows a driver to remap an arbitrary region of bus memory into
 13 * virtual space.  One should *only* use readl, writel, memcpy_toio and
 14 * so on with such remapped areas.
 15 *
 16 * Because the ARM only has a 32-bit address space we can't address the
 17 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
 18 * allows us to circumvent this restriction by splitting PCI space into
 19 * two 2GB chunks and mapping only one at a time into processor memory.
 20 * We use MMU protection domains to trap any attempt to access the bank
 21 * that is not currently mapped.  (This isn't fully implemented yet.)
 22 */
 23#include <linux/module.h>
 24#include <linux/errno.h>
 25#include <linux/mm.h>
 26#include <linux/vmalloc.h>
 27#include <linux/io.h>
 
 
 28
 
 29#include <asm/cputype.h>
 30#include <asm/cacheflush.h>
 
 31#include <asm/mmu_context.h>
 32#include <asm/pgalloc.h>
 33#include <asm/tlbflush.h>
 34#include <asm/sizes.h>
 35
 36#include <asm/mach/map.h>
 
 37#include "mm.h"
 38
 39/*
 40 * Used by ioremap() and iounmap() code to mark (super)section-mapped
 41 * I/O regions in vm_struct->flags field.
 42 */
 43#define VM_ARM_SECTION_MAPPING	0x80000000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45int ioremap_page(unsigned long virt, unsigned long phys,
 46		 const struct mem_type *mtype)
 47{
 48	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
 49				  __pgprot(mtype->prot_pte));
 50}
 51EXPORT_SYMBOL(ioremap_page);
 52
 53void __check_kvm_seq(struct mm_struct *mm)
 54{
 55	unsigned int seq;
 56
 57	do {
 58		seq = init_mm.context.kvm_seq;
 59		memcpy(pgd_offset(mm, VMALLOC_START),
 60		       pgd_offset_k(VMALLOC_START),
 61		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
 62					pgd_index(VMALLOC_START)));
 63		mm->context.kvm_seq = seq;
 64	} while (seq != init_mm.context.kvm_seq);
 65}
 66
 67#ifndef CONFIG_SMP
 68/*
 69 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 70 * the other CPUs will not see this change until their next context switch.
 71 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 72 * which requires the new ioremap'd region to be referenced, the CPU will
 73 * reference the _old_ region.
 74 *
 75 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 76 * mask the size back to 1MB aligned or we will overflow in the loop below.
 77 */
 78static void unmap_area_sections(unsigned long virt, unsigned long size)
 79{
 80	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
 81	pgd_t *pgd;
 82
 83	flush_cache_vunmap(addr, end);
 84	pgd = pgd_offset_k(addr);
 85	do {
 86		pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
 87
 88		pmd = *pmdp;
 89		if (!pmd_none(pmd)) {
 90			/*
 91			 * Clear the PMD from the page table, and
 92			 * increment the kvm sequence so others
 93			 * notice this change.
 94			 *
 95			 * Note: this is still racy on SMP machines.
 96			 */
 97			pmd_clear(pmdp);
 98			init_mm.context.kvm_seq++;
 99
100			/*
101			 * Free the page table, if there was one.
102			 */
103			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
104				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
105		}
106
107		addr += PGDIR_SIZE;
108		pgd++;
109	} while (addr < end);
110
111	/*
112	 * Ensure that the active_mm is up to date - we want to
113	 * catch any use-after-iounmap cases.
114	 */
115	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
116		__check_kvm_seq(current->active_mm);
117
118	flush_tlb_kernel_range(virt, end);
119}
120
121static int
122remap_area_sections(unsigned long virt, unsigned long pfn,
123		    size_t size, const struct mem_type *type)
124{
125	unsigned long addr = virt, end = virt + size;
126	pgd_t *pgd;
127
128	/*
129	 * Remove and free any PTE-based mapping, and
130	 * sync the current kernel mapping.
131	 */
132	unmap_area_sections(virt, size);
133
134	pgd = pgd_offset_k(addr);
135	do {
136		pmd_t *pmd = pmd_offset(pgd, addr);
137
138		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
139		pfn += SZ_1M >> PAGE_SHIFT;
140		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
141		pfn += SZ_1M >> PAGE_SHIFT;
142		flush_pmd_entry(pmd);
143
144		addr += PGDIR_SIZE;
145		pgd++;
146	} while (addr < end);
147
148	return 0;
149}
150
151static int
152remap_area_supersections(unsigned long virt, unsigned long pfn,
153			 size_t size, const struct mem_type *type)
154{
155	unsigned long addr = virt, end = virt + size;
156	pgd_t *pgd;
157
158	/*
159	 * Remove and free any PTE-based mapping, and
160	 * sync the current kernel mapping.
161	 */
162	unmap_area_sections(virt, size);
163
164	pgd = pgd_offset_k(virt);
165	do {
166		unsigned long super_pmd_val, i;
167
168		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
169				PMD_SECT_SUPER;
170		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
171
172		for (i = 0; i < 8; i++) {
173			pmd_t *pmd = pmd_offset(pgd, addr);
174
175			pmd[0] = __pmd(super_pmd_val);
176			pmd[1] = __pmd(super_pmd_val);
177			flush_pmd_entry(pmd);
178
179			addr += PGDIR_SIZE;
180			pgd++;
181		}
182
183		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
184	} while (addr < end);
185
186	return 0;
187}
188#endif
189
190void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
191	unsigned long offset, size_t size, unsigned int mtype, void *caller)
192{
193	const struct mem_type *type;
194	int err;
195	unsigned long addr;
196 	struct vm_struct * area;
 
197
 
198	/*
199	 * High mappings must be supersection aligned
200	 */
201	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
202		return NULL;
203
204	/*
205	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206	 */
207	if (WARN_ON(pfn_valid(pfn)))
208		return NULL;
 
209
210	type = get_mem_type(mtype);
211	if (!type)
212		return NULL;
213
214	/*
215	 * Page align the mapping size, taking account of any offset.
216	 */
217	size = PAGE_ALIGN(offset + size);
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219	area = get_vm_area_caller(size, VM_IOREMAP, caller);
220 	if (!area)
221 		return NULL;
222 	addr = (unsigned long)area->addr;
 
223
224#ifndef CONFIG_SMP
225	if (DOMAIN_IO == 0 &&
226	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
227	       cpu_is_xsc3()) && pfn >= 0x100000 &&
228	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
229		area->flags |= VM_ARM_SECTION_MAPPING;
230		err = remap_area_supersections(addr, pfn, size, type);
231	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
232		area->flags |= VM_ARM_SECTION_MAPPING;
233		err = remap_area_sections(addr, pfn, size, type);
234	} else
235#endif
236		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
237					 __pgprot(type->prot_pte));
238
239	if (err) {
240 		vunmap((void *)addr);
241 		return NULL;
242 	}
243
244	flush_cache_vmap(addr, addr + size);
245	return (void __iomem *) (offset + addr);
246}
247
248void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
249	unsigned int mtype, void *caller)
250{
251	unsigned long last_addr;
252 	unsigned long offset = phys_addr & ~PAGE_MASK;
253 	unsigned long pfn = __phys_to_pfn(phys_addr);
254
255 	/*
256 	 * Don't allow wraparound or zero size
257	 */
258	last_addr = phys_addr + size - 1;
259	if (!size || last_addr < phys_addr)
260		return NULL;
261
262	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
263			caller);
264}
265
266/*
267 * Remap an arbitrary physical address space into the kernel virtual
268 * address space. Needed when the kernel wants to access high addresses
269 * directly.
270 *
271 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
272 * have to convert them into an offset in a page-aligned mapping, but the
273 * caller shouldn't need to know that small detail.
274 */
275void __iomem *
276__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
277		  unsigned int mtype)
278{
279	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
280			__builtin_return_address(0));
281}
282EXPORT_SYMBOL(__arm_ioremap_pfn);
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284void __iomem *
285__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
286{
 
 
 
 
 
 
 
287	return __arm_ioremap_caller(phys_addr, size, mtype,
288			__builtin_return_address(0));
289}
290EXPORT_SYMBOL(__arm_ioremap);
 
 
 
 
 
 
291
292void __iounmap(volatile void __iomem *io_addr)
293{
294	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
295#ifndef CONFIG_SMP
296	struct vm_struct **p, *tmp;
297
298	/*
299	 * If this is a section based mapping we need to handle it
300	 * specially as the VM subsystem does not know how to handle
301	 * such a beast. We need the lock here b/c we need to clear
302	 * all the mappings before the area can be reclaimed
303	 * by someone else.
304	 */
305	write_lock(&vmlist_lock);
306	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
307		if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
308			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
309				unmap_area_sections((unsigned long)tmp->addr,
310						    tmp->size);
311			}
312			break;
313		}
 
 
314	}
315	write_unlock(&vmlist_lock);
316#endif
317
318	vunmap(addr);
319}
320EXPORT_SYMBOL(__iounmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/ioremap.c
  4 *
  5 * Re-map IO memory to kernel address space so that we can access it.
  6 *
  7 * (C) Copyright 1995 1996 Linus Torvalds
  8 *
  9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
 10 * Hacked to allow all architectures to build, and various cleanups
 11 * by Russell King
 12 *
 13 * This allows a driver to remap an arbitrary region of bus memory into
 14 * virtual space.  One should *only* use readl, writel, memcpy_toio and
 15 * so on with such remapped areas.
 16 *
 17 * Because the ARM only has a 32-bit address space we can't address the
 18 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
 19 * allows us to circumvent this restriction by splitting PCI space into
 20 * two 2GB chunks and mapping only one at a time into processor memory.
 21 * We use MMU protection domains to trap any attempt to access the bank
 22 * that is not currently mapped.  (This isn't fully implemented yet.)
 23 */
 24#include <linux/module.h>
 25#include <linux/errno.h>
 26#include <linux/mm.h>
 27#include <linux/vmalloc.h>
 28#include <linux/io.h>
 29#include <linux/sizes.h>
 30#include <linux/memblock.h>
 31
 32#include <asm/cp15.h>
 33#include <asm/cputype.h>
 34#include <asm/cacheflush.h>
 35#include <asm/early_ioremap.h>
 36#include <asm/mmu_context.h>
 37#include <asm/pgalloc.h>
 38#include <asm/tlbflush.h>
 39#include <asm/system_info.h>
 40
 41#include <asm/mach/map.h>
 42#include <asm/mach/pci.h>
 43#include "mm.h"
 44
 45
 46LIST_HEAD(static_vmlist);
 47
 48static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
 49			size_t size, unsigned int mtype)
 50{
 51	struct static_vm *svm;
 52	struct vm_struct *vm;
 53
 54	list_for_each_entry(svm, &static_vmlist, list) {
 55		vm = &svm->vm;
 56		if (!(vm->flags & VM_ARM_STATIC_MAPPING))
 57			continue;
 58		if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
 59			continue;
 60
 61		if (vm->phys_addr > paddr ||
 62			paddr + size - 1 > vm->phys_addr + vm->size - 1)
 63			continue;
 64
 65		return svm;
 66	}
 67
 68	return NULL;
 69}
 70
 71struct static_vm *find_static_vm_vaddr(void *vaddr)
 72{
 73	struct static_vm *svm;
 74	struct vm_struct *vm;
 75
 76	list_for_each_entry(svm, &static_vmlist, list) {
 77		vm = &svm->vm;
 78
 79		/* static_vmlist is ascending order */
 80		if (vm->addr > vaddr)
 81			break;
 82
 83		if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
 84			return svm;
 85	}
 86
 87	return NULL;
 88}
 89
 90void __init add_static_vm_early(struct static_vm *svm)
 91{
 92	struct static_vm *curr_svm;
 93	struct vm_struct *vm;
 94	void *vaddr;
 95
 96	vm = &svm->vm;
 97	vm_area_add_early(vm);
 98	vaddr = vm->addr;
 99
100	list_for_each_entry(curr_svm, &static_vmlist, list) {
101		vm = &curr_svm->vm;
102
103		if (vm->addr > vaddr)
104			break;
105	}
106	list_add_tail(&svm->list, &curr_svm->list);
107}
108
109int ioremap_page(unsigned long virt, unsigned long phys,
110		 const struct mem_type *mtype)
111{
112	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
113				  __pgprot(mtype->prot_pte));
114}
115EXPORT_SYMBOL(ioremap_page);
116
117void __check_vmalloc_seq(struct mm_struct *mm)
118{
119	unsigned int seq;
120
121	do {
122		seq = init_mm.context.vmalloc_seq;
123		memcpy(pgd_offset(mm, VMALLOC_START),
124		       pgd_offset_k(VMALLOC_START),
125		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
126					pgd_index(VMALLOC_START)));
127		mm->context.vmalloc_seq = seq;
128	} while (seq != init_mm.context.vmalloc_seq);
129}
130
131#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
132/*
133 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
134 * the other CPUs will not see this change until their next context switch.
135 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
136 * which requires the new ioremap'd region to be referenced, the CPU will
137 * reference the _old_ region.
138 *
139 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
140 * mask the size back to 1MB aligned or we will overflow in the loop below.
141 */
142static void unmap_area_sections(unsigned long virt, unsigned long size)
143{
144	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
145	pmd_t *pmdp = pmd_off_k(addr);
146
 
 
147	do {
148		pmd_t pmd = *pmdp;
149
 
150		if (!pmd_none(pmd)) {
151			/*
152			 * Clear the PMD from the page table, and
153			 * increment the vmalloc sequence so others
154			 * notice this change.
155			 *
156			 * Note: this is still racy on SMP machines.
157			 */
158			pmd_clear(pmdp);
159			init_mm.context.vmalloc_seq++;
160
161			/*
162			 * Free the page table, if there was one.
163			 */
164			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
165				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
166		}
167
168		addr += PMD_SIZE;
169		pmdp += 2;
170	} while (addr < end);
171
172	/*
173	 * Ensure that the active_mm is up to date - we want to
174	 * catch any use-after-iounmap cases.
175	 */
176	if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
177		__check_vmalloc_seq(current->active_mm);
178
179	flush_tlb_kernel_range(virt, end);
180}
181
182static int
183remap_area_sections(unsigned long virt, unsigned long pfn,
184		    size_t size, const struct mem_type *type)
185{
186	unsigned long addr = virt, end = virt + size;
187	pmd_t *pmd = pmd_off_k(addr);
188
189	/*
190	 * Remove and free any PTE-based mapping, and
191	 * sync the current kernel mapping.
192	 */
193	unmap_area_sections(virt, size);
194
 
195	do {
 
 
196		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
197		pfn += SZ_1M >> PAGE_SHIFT;
198		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
199		pfn += SZ_1M >> PAGE_SHIFT;
200		flush_pmd_entry(pmd);
201
202		addr += PMD_SIZE;
203		pmd += 2;
204	} while (addr < end);
205
206	return 0;
207}
208
209static int
210remap_area_supersections(unsigned long virt, unsigned long pfn,
211			 size_t size, const struct mem_type *type)
212{
213	unsigned long addr = virt, end = virt + size;
214	pmd_t *pmd = pmd_off_k(addr);
215
216	/*
217	 * Remove and free any PTE-based mapping, and
218	 * sync the current kernel mapping.
219	 */
220	unmap_area_sections(virt, size);
 
 
221	do {
222		unsigned long super_pmd_val, i;
223
224		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
225				PMD_SECT_SUPER;
226		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
227
228		for (i = 0; i < 8; i++) {
 
 
229			pmd[0] = __pmd(super_pmd_val);
230			pmd[1] = __pmd(super_pmd_val);
231			flush_pmd_entry(pmd);
232
233			addr += PMD_SIZE;
234			pmd += 2;
235		}
236
237		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
238	} while (addr < end);
239
240	return 0;
241}
242#endif
243
244static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
245	unsigned long offset, size_t size, unsigned int mtype, void *caller)
246{
247	const struct mem_type *type;
248	int err;
249	unsigned long addr;
250	struct vm_struct *area;
251	phys_addr_t paddr = __pfn_to_phys(pfn);
252
253#ifndef CONFIG_ARM_LPAE
254	/*
255	 * High mappings must be supersection aligned
256	 */
257	if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
 
 
 
 
 
 
258		return NULL;
259#endif
260
261	type = get_mem_type(mtype);
262	if (!type)
263		return NULL;
264
265	/*
266	 * Page align the mapping size, taking account of any offset.
267	 */
268	size = PAGE_ALIGN(offset + size);
269
270	/*
271	 * Try to reuse one of the static mapping whenever possible.
272	 */
273	if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
274		struct static_vm *svm;
275
276		svm = find_static_vm_paddr(paddr, size, mtype);
277		if (svm) {
278			addr = (unsigned long)svm->vm.addr;
279			addr += paddr - svm->vm.phys_addr;
280			return (void __iomem *) (offset + addr);
281		}
282	}
283
284	/*
285	 * Don't allow RAM to be mapped with mismatched attributes - this
286	 * causes problems with ARMv6+
287	 */
288	if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
289		    mtype != MT_MEMORY_RW))
290		return NULL;
291
292	area = get_vm_area_caller(size, VM_IOREMAP, caller);
293 	if (!area)
294 		return NULL;
295 	addr = (unsigned long)area->addr;
296	area->phys_addr = paddr;
297
298#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
299	if (DOMAIN_IO == 0 &&
300	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
301	       cpu_is_xsc3()) && pfn >= 0x100000 &&
302	       !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
303		area->flags |= VM_ARM_SECTION_MAPPING;
304		err = remap_area_supersections(addr, pfn, size, type);
305	} else if (!((paddr | size | addr) & ~PMD_MASK)) {
306		area->flags |= VM_ARM_SECTION_MAPPING;
307		err = remap_area_sections(addr, pfn, size, type);
308	} else
309#endif
310		err = ioremap_page_range(addr, addr + size, paddr,
311					 __pgprot(type->prot_pte));
312
313	if (err) {
314 		vunmap((void *)addr);
315 		return NULL;
316 	}
317
318	flush_cache_vmap(addr, addr + size);
319	return (void __iomem *) (offset + addr);
320}
321
322void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
323	unsigned int mtype, void *caller)
324{
325	phys_addr_t last_addr;
326 	unsigned long offset = phys_addr & ~PAGE_MASK;
327 	unsigned long pfn = __phys_to_pfn(phys_addr);
328
329 	/*
330 	 * Don't allow wraparound or zero size
331	 */
332	last_addr = phys_addr + size - 1;
333	if (!size || last_addr < phys_addr)
334		return NULL;
335
336	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
337			caller);
338}
339
340/*
341 * Remap an arbitrary physical address space into the kernel virtual
342 * address space. Needed when the kernel wants to access high addresses
343 * directly.
344 *
345 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
346 * have to convert them into an offset in a page-aligned mapping, but the
347 * caller shouldn't need to know that small detail.
348 */
349void __iomem *
350__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
351		  unsigned int mtype)
352{
353	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
354					__builtin_return_address(0));
355}
356EXPORT_SYMBOL(__arm_ioremap_pfn);
357
358void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
359				      unsigned int, void *) =
360	__arm_ioremap_caller;
361
362void __iomem *ioremap(resource_size_t res_cookie, size_t size)
363{
364	return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
365				   __builtin_return_address(0));
366}
367EXPORT_SYMBOL(ioremap);
368
369void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
370{
371	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
372				   __builtin_return_address(0));
373}
374EXPORT_SYMBOL(ioremap_cache);
375
376void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
377{
378	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
379				   __builtin_return_address(0));
380}
381EXPORT_SYMBOL(ioremap_wc);
382
383/*
384 * Remap an arbitrary physical address space into the kernel virtual
385 * address space as memory. Needed when the kernel wants to execute
386 * code in external memory. This is needed for reprogramming source
387 * clocks that would affect normal memory for example. Please see
388 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
389 */
390void __iomem *
391__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
392{
393	unsigned int mtype;
394
395	if (cached)
396		mtype = MT_MEMORY_RWX;
397	else
398		mtype = MT_MEMORY_RWX_NONCACHED;
399
400	return __arm_ioremap_caller(phys_addr, size, mtype,
401			__builtin_return_address(0));
402}
403
404void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
405{
406	return (__force void *)arch_ioremap_caller(phys_addr, size,
407						   MT_MEMORY_RW,
408						   __builtin_return_address(0));
409}
410
411void __iounmap(volatile void __iomem *io_addr)
412{
413	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
414	struct static_vm *svm;
 
415
416	/* If this is a static mapping, we must leave it alone */
417	svm = find_static_vm_vaddr(addr);
418	if (svm)
419		return;
420
421#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
422	{
423		struct vm_struct *vm;
424
425		vm = find_vm_area(addr);
426
427		/*
428		 * If this is a section based mapping we need to handle it
429		 * specially as the VM subsystem does not know how to handle
430		 * such a beast.
431		 */
432		if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
433			unmap_area_sections((unsigned long)vm->addr, vm->size);
434	}
 
435#endif
436
437	vunmap(addr);
438}
439
440void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
441
442void iounmap(volatile void __iomem *cookie)
443{
444	arch_iounmap(cookie);
445}
446EXPORT_SYMBOL(iounmap);
447
448#ifdef CONFIG_PCI
449static int pci_ioremap_mem_type = MT_DEVICE;
450
451void pci_ioremap_set_mem_type(int mem_type)
452{
453	pci_ioremap_mem_type = mem_type;
454}
455
456int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
457{
458	BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
459
460	return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
461				  PCI_IO_VIRT_BASE + offset + SZ_64K,
462				  phys_addr,
463				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
464}
465EXPORT_SYMBOL_GPL(pci_ioremap_io);
466
467void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
468{
469	return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
470				   __builtin_return_address(0));
471}
472EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
473#endif
474
475/*
476 * Must be called after early_fixmap_init
477 */
478void __init early_ioremap_init(void)
479{
480	early_ioremap_setup();
481}