Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Re-map IO memory to kernel address space so that we can access it.
  3 * This is needed for high PCI addresses that aren't mapped in the
  4 * 640k-1MB IO memory area on PC's
  5 *
  6 * (C) Copyright 1995 1996 Linus Torvalds
  7 */
  8
  9#include <linux/bootmem.h>
 10#include <linux/init.h>
 11#include <linux/io.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/mmiotrace.h>
 16
 17#include <asm/cacheflush.h>
 18#include <asm/e820.h>
 19#include <asm/fixmap.h>
 20#include <asm/pgtable.h>
 21#include <asm/tlbflush.h>
 22#include <asm/pgalloc.h>
 23#include <asm/pat.h>
 24
 25#include "physaddr.h"
 26
 27/*
 28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 29 * conflicts.
 30 */
 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
 32			       unsigned long prot_val)
 33{
 34	unsigned long nrpages = size >> PAGE_SHIFT;
 35	int err;
 36
 37	switch (prot_val) {
 38	case _PAGE_CACHE_UC:
 39	default:
 40		err = _set_memory_uc(vaddr, nrpages);
 41		break;
 42	case _PAGE_CACHE_WC:
 43		err = _set_memory_wc(vaddr, nrpages);
 44		break;
 45	case _PAGE_CACHE_WB:
 
 
 
 46		err = _set_memory_wb(vaddr, nrpages);
 47		break;
 48	}
 49
 50	return err;
 51}
 52
 
 
 
 
 
 
 
 
 
 
 
 
 
 53/*
 54 * Remap an arbitrary physical address space into the kernel virtual
 55 * address space. Needed when the kernel wants to access high addresses
 56 * directly.
 
 
 
 
 
 57 *
 58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 59 * have to convert them into an offset in a page-aligned mapping, but the
 60 * caller shouldn't need to know that small detail.
 61 */
 62static void __iomem *__ioremap_caller(resource_size_t phys_addr,
 63		unsigned long size, unsigned long prot_val, void *caller)
 64{
 65	unsigned long offset, vaddr;
 66	resource_size_t pfn, last_pfn, last_addr;
 67	const resource_size_t unaligned_phys_addr = phys_addr;
 68	const unsigned long unaligned_size = size;
 69	struct vm_struct *area;
 70	unsigned long new_prot_val;
 71	pgprot_t prot;
 72	int retval;
 73	void __iomem *ret_addr;
 74
 75	/* Don't allow wraparound or zero size */
 76	last_addr = phys_addr + size - 1;
 77	if (!size || last_addr < phys_addr)
 78		return NULL;
 79
 80	if (!phys_addr_valid(phys_addr)) {
 81		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
 82		       (unsigned long long)phys_addr);
 83		WARN_ON_ONCE(1);
 84		return NULL;
 85	}
 86
 87	/*
 88	 * Don't remap the low PCI/ISA area, it's always mapped..
 89	 */
 90	if (is_ISA_range(phys_addr, last_addr))
 91		return (__force void __iomem *)phys_to_virt(phys_addr);
 92
 93	/*
 94	 * Don't allow anybody to remap normal RAM that we're using..
 95	 */
 
 96	last_pfn = last_addr >> PAGE_SHIFT;
 97	for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
 98		int is_ram = page_is_ram(pfn);
 99
100		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
101			return NULL;
102		WARN_ON_ONCE(is_ram);
103	}
104
105	/*
106	 * Mappings have to be page-aligned
107	 */
108	offset = phys_addr & ~PAGE_MASK;
109	phys_addr &= PHYSICAL_PAGE_MASK;
110	size = PAGE_ALIGN(last_addr+1) - phys_addr;
111
112	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
113						prot_val, &new_prot_val);
114	if (retval) {
115		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
116		return NULL;
117	}
118
119	if (prot_val != new_prot_val) {
120		if (!is_new_memtype_allowed(phys_addr, size,
121					    prot_val, new_prot_val)) {
122			printk(KERN_ERR
123		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
124				(unsigned long long)phys_addr,
125				(unsigned long long)(phys_addr + size),
126				prot_val, new_prot_val);
127			goto err_free_memtype;
128		}
129		prot_val = new_prot_val;
130	}
131
132	switch (prot_val) {
133	case _PAGE_CACHE_UC:
 
134	default:
135		prot = PAGE_KERNEL_IO_NOCACHE;
 
136		break;
137	case _PAGE_CACHE_UC_MINUS:
138		prot = PAGE_KERNEL_IO_UC_MINUS;
 
139		break;
140	case _PAGE_CACHE_WC:
141		prot = PAGE_KERNEL_IO_WC;
 
142		break;
143	case _PAGE_CACHE_WB:
144		prot = PAGE_KERNEL_IO;
 
 
 
145		break;
146	}
147
148	/*
149	 * Ok, go for it..
150	 */
151	area = get_vm_area_caller(size, VM_IOREMAP, caller);
152	if (!area)
153		goto err_free_memtype;
154	area->phys_addr = phys_addr;
155	vaddr = (unsigned long) area->addr;
156
157	if (kernel_map_sync_memtype(phys_addr, size, prot_val))
158		goto err_free_area;
159
160	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
161		goto err_free_area;
162
163	ret_addr = (void __iomem *) (vaddr + offset);
164	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
165
166	/*
167	 * Check if the request spans more than any BAR in the iomem resource
168	 * tree.
169	 */
170	WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
171		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
172
173	return ret_addr;
174err_free_area:
175	free_vm_area(area);
176err_free_memtype:
177	free_memtype(phys_addr, phys_addr + size);
178	return NULL;
179}
180
181/**
182 * ioremap_nocache     -   map bus memory into CPU space
183 * @phys_addr:    bus address of the memory
184 * @size:      size of the resource to map
185 *
186 * ioremap_nocache performs a platform specific sequence of operations to
187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
188 * writew/writel functions and the other mmio helpers. The returned
189 * address is not guaranteed to be usable directly as a virtual
190 * address.
191 *
192 * This version of ioremap ensures that the memory is marked uncachable
193 * on the CPU as well as honouring existing caching rules from things like
194 * the PCI bus. Note that there are other caches and buffers on many
195 * busses. In particular driver authors should read up on PCI writes
196 *
197 * It's useful if some control registers are in such an area and
198 * write combining or read caching is not desirable:
199 *
200 * Must be freed with iounmap.
201 */
202void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
203{
204	/*
205	 * Ideally, this should be:
206	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
207	 *
208	 * Till we fix all X drivers to use ioremap_wc(), we will use
209	 * UC MINUS.
 
210	 */
211	unsigned long val = _PAGE_CACHE_UC_MINUS;
212
213	return __ioremap_caller(phys_addr, size, val,
214				__builtin_return_address(0));
215}
216EXPORT_SYMBOL(ioremap_nocache);
217
218/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219 * ioremap_wc	-	map memory into CPU space write combined
220 * @phys_addr:	bus address of the memory
221 * @size:	size of the resource to map
222 *
223 * This version of ioremap ensures that the memory is marked write combining.
224 * Write combining allows faster writes to some hardware devices.
225 *
226 * Must be freed with iounmap.
227 */
228void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
229{
230	if (pat_enabled)
231		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
232					__builtin_return_address(0));
233	else
234		return ioremap_nocache(phys_addr, size);
235}
236EXPORT_SYMBOL(ioremap_wc);
237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
239{
240	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
241				__builtin_return_address(0));
242}
243EXPORT_SYMBOL(ioremap_cache);
244
245void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
246				unsigned long prot_val)
247{
248	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
 
249				__builtin_return_address(0));
250}
251EXPORT_SYMBOL(ioremap_prot);
252
253/**
254 * iounmap - Free a IO remapping
255 * @addr: virtual address from ioremap_*
256 *
257 * Caller must ensure there is only one unmapping for the same pointer.
258 */
259void iounmap(volatile void __iomem *addr)
260{
261	struct vm_struct *p, *o;
262
263	if ((void __force *)addr <= high_memory)
264		return;
265
266	/*
267	 * __ioremap special-cases the PCI/ISA range by not instantiating a
268	 * vm_area and by simply returning an address into the kernel mapping
269	 * of ISA space.   So handle that here.
270	 */
271	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
272	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
273		return;
274
275	addr = (volatile void __iomem *)
276		(PAGE_MASK & (unsigned long __force)addr);
277
278	mmiotrace_iounmap(addr);
279
280	/* Use the vm area unlocked, assuming the caller
281	   ensures there isn't another iounmap for the same address
282	   in parallel. Reuse of the virtual address is prevented by
283	   leaving it in the global lists until we're done with it.
284	   cpa takes care of the direct mappings. */
285	p = find_vm_area((void __force *)addr);
286
287	if (!p) {
288		printk(KERN_ERR "iounmap: bad address %p\n", addr);
289		dump_stack();
290		return;
291	}
292
293	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
294
295	/* Finally remove it */
296	o = remove_vm_area((void __force *)addr);
297	BUG_ON(p != o || o == NULL);
298	kfree(p);
299}
300EXPORT_SYMBOL(iounmap);
301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302/*
303 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
304 * access
305 */
306void *xlate_dev_mem_ptr(unsigned long phys)
307{
308	void *addr;
309	unsigned long start = phys & PAGE_MASK;
 
310
311	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
312	if (page_is_ram(start >> PAGE_SHIFT))
313		return __va(phys);
314
315	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
316	if (addr)
317		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 
318
319	return addr;
320}
321
322void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
323{
324	if (page_is_ram(phys >> PAGE_SHIFT))
325		return;
326
327	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
328	return;
329}
330
331static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
332
333static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
334{
335	/* Don't assume we're using swapper_pg_dir at this point */
336	pgd_t *base = __va(read_cr3());
337	pgd_t *pgd = &base[pgd_index(addr)];
338	pud_t *pud = pud_offset(pgd, addr);
339	pmd_t *pmd = pmd_offset(pud, addr);
340
341	return pmd;
342}
343
344static inline pte_t * __init early_ioremap_pte(unsigned long addr)
345{
346	return &bm_pte[pte_index(addr)];
347}
348
349bool __init is_early_ioremap_ptep(pte_t *ptep)
350{
351	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
352}
353
354void __init early_ioremap_init(void)
355{
356	pmd_t *pmd;
 
 
 
 
 
 
357
358	early_ioremap_setup();
359
360	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
361	memset(bm_pte, 0, sizeof(bm_pte));
362	pmd_populate_kernel(&init_mm, pmd, bm_pte);
363
364	/*
365	 * The boot-ioremap range spans multiple pmds, for which
366	 * we are not prepared:
367	 */
368#define __FIXADDR_TOP (-PAGE_SIZE)
369	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
370		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
371#undef __FIXADDR_TOP
372	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
373		WARN_ON(1);
374		printk(KERN_WARNING "pmd %p != %p\n",
375		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
376		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
377			fix_to_virt(FIX_BTMAP_BEGIN));
378		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
379			fix_to_virt(FIX_BTMAP_END));
380
381		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
382		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
383		       FIX_BTMAP_BEGIN);
384	}
385}
386
387void __init __early_set_fixmap(enum fixed_addresses idx,
388			       phys_addr_t phys, pgprot_t flags)
389{
390	unsigned long addr = __fix_to_virt(idx);
391	pte_t *pte;
392
393	if (idx >= __end_of_fixed_addresses) {
394		BUG();
395		return;
396	}
397	pte = early_ioremap_pte(addr);
398
399	if (pgprot_val(flags))
400		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
401	else
402		pte_clear(&init_mm, addr, pte);
403	__flush_tlb_one(addr);
404}
v4.6
  1/*
  2 * Re-map IO memory to kernel address space so that we can access it.
  3 * This is needed for high PCI addresses that aren't mapped in the
  4 * 640k-1MB IO memory area on PC's
  5 *
  6 * (C) Copyright 1995 1996 Linus Torvalds
  7 */
  8
  9#include <linux/bootmem.h>
 10#include <linux/init.h>
 11#include <linux/io.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/mmiotrace.h>
 16
 17#include <asm/cacheflush.h>
 18#include <asm/e820.h>
 19#include <asm/fixmap.h>
 20#include <asm/pgtable.h>
 21#include <asm/tlbflush.h>
 22#include <asm/pgalloc.h>
 23#include <asm/pat.h>
 24
 25#include "physaddr.h"
 26
 27/*
 28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 29 * conflicts.
 30 */
 31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
 32			enum page_cache_mode pcm)
 33{
 34	unsigned long nrpages = size >> PAGE_SHIFT;
 35	int err;
 36
 37	switch (pcm) {
 38	case _PAGE_CACHE_MODE_UC:
 39	default:
 40		err = _set_memory_uc(vaddr, nrpages);
 41		break;
 42	case _PAGE_CACHE_MODE_WC:
 43		err = _set_memory_wc(vaddr, nrpages);
 44		break;
 45	case _PAGE_CACHE_MODE_WT:
 46		err = _set_memory_wt(vaddr, nrpages);
 47		break;
 48	case _PAGE_CACHE_MODE_WB:
 49		err = _set_memory_wb(vaddr, nrpages);
 50		break;
 51	}
 52
 53	return err;
 54}
 55
 56static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
 57			       void *arg)
 58{
 59	unsigned long i;
 60
 61	for (i = 0; i < nr_pages; ++i)
 62		if (pfn_valid(start_pfn + i) &&
 63		    !PageReserved(pfn_to_page(start_pfn + i)))
 64			return 1;
 65
 66	return 0;
 67}
 68
 69/*
 70 * Remap an arbitrary physical address space into the kernel virtual
 71 * address space. It transparently creates kernel huge I/O mapping when
 72 * the physical address is aligned by a huge page size (1GB or 2MB) and
 73 * the requested size is at least the huge page size.
 74 *
 75 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
 76 * Therefore, the mapping code falls back to use a smaller page toward 4KB
 77 * when a mapping range is covered by non-WB type of MTRRs.
 78 *
 79 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 80 * have to convert them into an offset in a page-aligned mapping, but the
 81 * caller shouldn't need to know that small detail.
 82 */
 83static void __iomem *__ioremap_caller(resource_size_t phys_addr,
 84		unsigned long size, enum page_cache_mode pcm, void *caller)
 85{
 86	unsigned long offset, vaddr;
 87	resource_size_t pfn, last_pfn, last_addr;
 88	const resource_size_t unaligned_phys_addr = phys_addr;
 89	const unsigned long unaligned_size = size;
 90	struct vm_struct *area;
 91	enum page_cache_mode new_pcm;
 92	pgprot_t prot;
 93	int retval;
 94	void __iomem *ret_addr;
 95
 96	/* Don't allow wraparound or zero size */
 97	last_addr = phys_addr + size - 1;
 98	if (!size || last_addr < phys_addr)
 99		return NULL;
100
101	if (!phys_addr_valid(phys_addr)) {
102		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
103		       (unsigned long long)phys_addr);
104		WARN_ON_ONCE(1);
105		return NULL;
106	}
107
108	/*
109	 * Don't remap the low PCI/ISA area, it's always mapped..
110	 */
111	if (is_ISA_range(phys_addr, last_addr))
112		return (__force void __iomem *)phys_to_virt(phys_addr);
113
114	/*
115	 * Don't allow anybody to remap normal RAM that we're using..
116	 */
117	pfn      = phys_addr >> PAGE_SHIFT;
118	last_pfn = last_addr >> PAGE_SHIFT;
119	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
120					  __ioremap_check_ram) == 1) {
121		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
122			  &phys_addr, &last_addr);
123		return NULL;
 
124	}
125
126	/*
127	 * Mappings have to be page-aligned
128	 */
129	offset = phys_addr & ~PAGE_MASK;
130	phys_addr &= PHYSICAL_PAGE_MASK;
131	size = PAGE_ALIGN(last_addr+1) - phys_addr;
132
133	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
134						pcm, &new_pcm);
135	if (retval) {
136		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
137		return NULL;
138	}
139
140	if (pcm != new_pcm) {
141		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
 
142			printk(KERN_ERR
143		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
144				(unsigned long long)phys_addr,
145				(unsigned long long)(phys_addr + size),
146				pcm, new_pcm);
147			goto err_free_memtype;
148		}
149		pcm = new_pcm;
150	}
151
152	prot = PAGE_KERNEL_IO;
153	switch (pcm) {
154	case _PAGE_CACHE_MODE_UC:
155	default:
156		prot = __pgprot(pgprot_val(prot) |
157				cachemode2protval(_PAGE_CACHE_MODE_UC));
158		break;
159	case _PAGE_CACHE_MODE_UC_MINUS:
160		prot = __pgprot(pgprot_val(prot) |
161				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
162		break;
163	case _PAGE_CACHE_MODE_WC:
164		prot = __pgprot(pgprot_val(prot) |
165				cachemode2protval(_PAGE_CACHE_MODE_WC));
166		break;
167	case _PAGE_CACHE_MODE_WT:
168		prot = __pgprot(pgprot_val(prot) |
169				cachemode2protval(_PAGE_CACHE_MODE_WT));
170		break;
171	case _PAGE_CACHE_MODE_WB:
172		break;
173	}
174
175	/*
176	 * Ok, go for it..
177	 */
178	area = get_vm_area_caller(size, VM_IOREMAP, caller);
179	if (!area)
180		goto err_free_memtype;
181	area->phys_addr = phys_addr;
182	vaddr = (unsigned long) area->addr;
183
184	if (kernel_map_sync_memtype(phys_addr, size, pcm))
185		goto err_free_area;
186
187	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
188		goto err_free_area;
189
190	ret_addr = (void __iomem *) (vaddr + offset);
191	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
192
193	/*
194	 * Check if the request spans more than any BAR in the iomem resource
195	 * tree.
196	 */
197	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
198		pr_warn("caller %pS mapping multiple BARs\n", caller);
199
200	return ret_addr;
201err_free_area:
202	free_vm_area(area);
203err_free_memtype:
204	free_memtype(phys_addr, phys_addr + size);
205	return NULL;
206}
207
208/**
209 * ioremap_nocache     -   map bus memory into CPU space
210 * @phys_addr:    bus address of the memory
211 * @size:      size of the resource to map
212 *
213 * ioremap_nocache performs a platform specific sequence of operations to
214 * make bus memory CPU accessible via the readb/readw/readl/writeb/
215 * writew/writel functions and the other mmio helpers. The returned
216 * address is not guaranteed to be usable directly as a virtual
217 * address.
218 *
219 * This version of ioremap ensures that the memory is marked uncachable
220 * on the CPU as well as honouring existing caching rules from things like
221 * the PCI bus. Note that there are other caches and buffers on many
222 * busses. In particular driver authors should read up on PCI writes
223 *
224 * It's useful if some control registers are in such an area and
225 * write combining or read caching is not desirable:
226 *
227 * Must be freed with iounmap.
228 */
229void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
230{
231	/*
232	 * Ideally, this should be:
233	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
234	 *
235	 * Till we fix all X drivers to use ioremap_wc(), we will use
236	 * UC MINUS. Drivers that are certain they need or can already
237	 * be converted over to strong UC can use ioremap_uc().
238	 */
239	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
240
241	return __ioremap_caller(phys_addr, size, pcm,
242				__builtin_return_address(0));
243}
244EXPORT_SYMBOL(ioremap_nocache);
245
246/**
247 * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
248 * @phys_addr:    bus address of the memory
249 * @size:      size of the resource to map
250 *
251 * ioremap_uc performs a platform specific sequence of operations to
252 * make bus memory CPU accessible via the readb/readw/readl/writeb/
253 * writew/writel functions and the other mmio helpers. The returned
254 * address is not guaranteed to be usable directly as a virtual
255 * address.
256 *
257 * This version of ioremap ensures that the memory is marked with a strong
258 * preference as completely uncachable on the CPU when possible. For non-PAT
259 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
260 * systems this will set the PAT entry for the pages as strong UC.  This call
261 * will honor existing caching rules from things like the PCI bus. Note that
262 * there are other caches and buffers on many busses. In particular driver
263 * authors should read up on PCI writes.
264 *
265 * It's useful if some control registers are in such an area and
266 * write combining or read caching is not desirable:
267 *
268 * Must be freed with iounmap.
269 */
270void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
271{
272	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
273
274	return __ioremap_caller(phys_addr, size, pcm,
275				__builtin_return_address(0));
276}
277EXPORT_SYMBOL_GPL(ioremap_uc);
278
279/**
280 * ioremap_wc	-	map memory into CPU space write combined
281 * @phys_addr:	bus address of the memory
282 * @size:	size of the resource to map
283 *
284 * This version of ioremap ensures that the memory is marked write combining.
285 * Write combining allows faster writes to some hardware devices.
286 *
287 * Must be freed with iounmap.
288 */
289void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
290{
291	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
 
292					__builtin_return_address(0));
 
 
293}
294EXPORT_SYMBOL(ioremap_wc);
295
296/**
297 * ioremap_wt	-	map memory into CPU space write through
298 * @phys_addr:	bus address of the memory
299 * @size:	size of the resource to map
300 *
301 * This version of ioremap ensures that the memory is marked write through.
302 * Write through stores data into memory while keeping the cache up-to-date.
303 *
304 * Must be freed with iounmap.
305 */
306void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
307{
308	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
309					__builtin_return_address(0));
310}
311EXPORT_SYMBOL(ioremap_wt);
312
313void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
314{
315	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
316				__builtin_return_address(0));
317}
318EXPORT_SYMBOL(ioremap_cache);
319
320void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
321				unsigned long prot_val)
322{
323	return __ioremap_caller(phys_addr, size,
324				pgprot2cachemode(__pgprot(prot_val)),
325				__builtin_return_address(0));
326}
327EXPORT_SYMBOL(ioremap_prot);
328
329/**
330 * iounmap - Free a IO remapping
331 * @addr: virtual address from ioremap_*
332 *
333 * Caller must ensure there is only one unmapping for the same pointer.
334 */
335void iounmap(volatile void __iomem *addr)
336{
337	struct vm_struct *p, *o;
338
339	if ((void __force *)addr <= high_memory)
340		return;
341
342	/*
343	 * __ioremap special-cases the PCI/ISA range by not instantiating a
344	 * vm_area and by simply returning an address into the kernel mapping
345	 * of ISA space.   So handle that here.
346	 */
347	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
348	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
349		return;
350
351	addr = (volatile void __iomem *)
352		(PAGE_MASK & (unsigned long __force)addr);
353
354	mmiotrace_iounmap(addr);
355
356	/* Use the vm area unlocked, assuming the caller
357	   ensures there isn't another iounmap for the same address
358	   in parallel. Reuse of the virtual address is prevented by
359	   leaving it in the global lists until we're done with it.
360	   cpa takes care of the direct mappings. */
361	p = find_vm_area((void __force *)addr);
362
363	if (!p) {
364		printk(KERN_ERR "iounmap: bad address %p\n", addr);
365		dump_stack();
366		return;
367	}
368
369	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
370
371	/* Finally remove it */
372	o = remove_vm_area((void __force *)addr);
373	BUG_ON(p != o || o == NULL);
374	kfree(p);
375}
376EXPORT_SYMBOL(iounmap);
377
378int __init arch_ioremap_pud_supported(void)
379{
380#ifdef CONFIG_X86_64
381	return cpu_has_gbpages;
382#else
383	return 0;
384#endif
385}
386
387int __init arch_ioremap_pmd_supported(void)
388{
389	return cpu_has_pse;
390}
391
392/*
393 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
394 * access
395 */
396void *xlate_dev_mem_ptr(phys_addr_t phys)
397{
398	unsigned long start  = phys &  PAGE_MASK;
399	unsigned long offset = phys & ~PAGE_MASK;
400	void *vaddr;
401
402	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
403	if (page_is_ram(start >> PAGE_SHIFT))
404		return __va(phys);
405
406	vaddr = ioremap_cache(start, PAGE_SIZE);
407	/* Only add the offset on success and return NULL if the ioremap() failed: */
408	if (vaddr)
409		vaddr += offset;
410
411	return vaddr;
412}
413
414void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
415{
416	if (page_is_ram(phys >> PAGE_SHIFT))
417		return;
418
419	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
 
420}
421
422static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
423
424static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
425{
426	/* Don't assume we're using swapper_pg_dir at this point */
427	pgd_t *base = __va(read_cr3());
428	pgd_t *pgd = &base[pgd_index(addr)];
429	pud_t *pud = pud_offset(pgd, addr);
430	pmd_t *pmd = pmd_offset(pud, addr);
431
432	return pmd;
433}
434
435static inline pte_t * __init early_ioremap_pte(unsigned long addr)
436{
437	return &bm_pte[pte_index(addr)];
438}
439
440bool __init is_early_ioremap_ptep(pte_t *ptep)
441{
442	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
443}
444
445void __init early_ioremap_init(void)
446{
447	pmd_t *pmd;
448
449#ifdef CONFIG_X86_64
450	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
451#else
452	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
453#endif
454
455	early_ioremap_setup();
456
457	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
458	memset(bm_pte, 0, sizeof(bm_pte));
459	pmd_populate_kernel(&init_mm, pmd, bm_pte);
460
461	/*
462	 * The boot-ioremap range spans multiple pmds, for which
463	 * we are not prepared:
464	 */
465#define __FIXADDR_TOP (-PAGE_SIZE)
466	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
467		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
468#undef __FIXADDR_TOP
469	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
470		WARN_ON(1);
471		printk(KERN_WARNING "pmd %p != %p\n",
472		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
473		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
474			fix_to_virt(FIX_BTMAP_BEGIN));
475		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
476			fix_to_virt(FIX_BTMAP_END));
477
478		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
479		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
480		       FIX_BTMAP_BEGIN);
481	}
482}
483
484void __init __early_set_fixmap(enum fixed_addresses idx,
485			       phys_addr_t phys, pgprot_t flags)
486{
487	unsigned long addr = __fix_to_virt(idx);
488	pte_t *pte;
489
490	if (idx >= __end_of_fixed_addresses) {
491		BUG();
492		return;
493	}
494	pte = early_ioremap_pte(addr);
495
496	if (pgprot_val(flags))
497		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
498	else
499		pte_clear(&init_mm, addr, pte);
500	__flush_tlb_one(addr);
501}