Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 * (C) Copyright 2001, 2002 Ralf Baechle
8 */
9#include <linux/module.h>
10#include <asm/addrspace.h>
11#include <asm/byteorder.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <asm/cacheflush.h>
16#include <asm/io.h>
17#include <asm/tlbflush.h>
18
19static inline void remap_area_pte(pte_t * pte, unsigned long address,
20 phys_t size, phys_t phys_addr, unsigned long flags)
21{
22 phys_t end;
23 unsigned long pfn;
24 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
25 | __WRITEABLE | flags);
26
27 address &= ~PMD_MASK;
28 end = address + size;
29 if (end > PMD_SIZE)
30 end = PMD_SIZE;
31 BUG_ON(address >= end);
32 pfn = phys_addr >> PAGE_SHIFT;
33 do {
34 if (!pte_none(*pte)) {
35 printk("remap_area_pte: page already exists\n");
36 BUG();
37 }
38 set_pte(pte, pfn_pte(pfn, pgprot));
39 address += PAGE_SIZE;
40 pfn++;
41 pte++;
42 } while (address && (address < end));
43}
44
45static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
46 phys_t size, phys_t phys_addr, unsigned long flags)
47{
48 phys_t end;
49
50 address &= ~PGDIR_MASK;
51 end = address + size;
52 if (end > PGDIR_SIZE)
53 end = PGDIR_SIZE;
54 phys_addr -= address;
55 BUG_ON(address >= end);
56 do {
57 pte_t * pte = pte_alloc_kernel(pmd, address);
58 if (!pte)
59 return -ENOMEM;
60 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
61 address = (address + PMD_SIZE) & PMD_MASK;
62 pmd++;
63 } while (address && (address < end));
64 return 0;
65}
66
67static int remap_area_pages(unsigned long address, phys_t phys_addr,
68 phys_t size, unsigned long flags)
69{
70 int error;
71 pgd_t * dir;
72 unsigned long end = address + size;
73
74 phys_addr -= address;
75 dir = pgd_offset(&init_mm, address);
76 flush_cache_all();
77 BUG_ON(address >= end);
78 do {
79 pud_t *pud;
80 pmd_t *pmd;
81
82 error = -ENOMEM;
83 pud = pud_alloc(&init_mm, dir, address);
84 if (!pud)
85 break;
86 pmd = pmd_alloc(&init_mm, pud, address);
87 if (!pmd)
88 break;
89 if (remap_area_pmd(pmd, address, end - address,
90 phys_addr + address, flags))
91 break;
92 error = 0;
93 address = (address + PGDIR_SIZE) & PGDIR_MASK;
94 dir++;
95 } while (address && (address < end));
96 flush_tlb_all();
97 return error;
98}
99
100/*
101 * Generic mapping function (not visible outside):
102 */
103
104/*
105 * Remap an arbitrary physical address space into the kernel virtual
106 * address space. Needed when the kernel wants to access high addresses
107 * directly.
108 *
109 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
110 * have to convert them into an offset in a page-aligned mapping, but the
111 * caller shouldn't need to know that small detail.
112 */
113
114#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
115
116void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
117{
118 struct vm_struct * area;
119 unsigned long offset;
120 phys_t last_addr;
121 void * addr;
122
123 phys_addr = fixup_bigphys_addr(phys_addr, size);
124
125 /* Don't allow wraparound or zero size */
126 last_addr = phys_addr + size - 1;
127 if (!size || last_addr < phys_addr)
128 return NULL;
129
130 /*
131 * Map uncached objects in the low 512mb of address space using KSEG1,
132 * otherwise map using page tables.
133 */
134 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
135 flags == _CACHE_UNCACHED)
136 return (void __iomem *) CKSEG1ADDR(phys_addr);
137
138 /*
139 * Don't allow anybody to remap normal RAM that we're using..
140 */
141 if (phys_addr < virt_to_phys(high_memory)) {
142 char *t_addr, *t_end;
143 struct page *page;
144
145 t_addr = __va(phys_addr);
146 t_end = t_addr + (size - 1);
147
148 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
149 if(!PageReserved(page))
150 return NULL;
151 }
152
153 /*
154 * Mappings have to be page-aligned
155 */
156 offset = phys_addr & ~PAGE_MASK;
157 phys_addr &= PAGE_MASK;
158 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
159
160 /*
161 * Ok, go for it..
162 */
163 area = get_vm_area(size, VM_IOREMAP);
164 if (!area)
165 return NULL;
166 addr = area->addr;
167 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
168 vunmap(addr);
169 return NULL;
170 }
171
172 return (void __iomem *) (offset + (char *)addr);
173}
174
175#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
176
177void __iounmap(const volatile void __iomem *addr)
178{
179 struct vm_struct *p;
180
181 if (IS_KSEG1(addr))
182 return;
183
184 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
185 if (!p)
186 printk(KERN_ERR "iounmap: bad address %p\n", addr);
187
188 kfree(p);
189}
190
191EXPORT_SYMBOL(__ioremap);
192EXPORT_SYMBOL(__iounmap);
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 * (C) Copyright 2001, 2002 Ralf Baechle
8 */
9#include <linux/export.h>
10#include <asm/addrspace.h>
11#include <asm/byteorder.h>
12#include <linux/ioport.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mm_types.h>
17#include <linux/io.h>
18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <ioremap.h>
21
22#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
23#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
24
25static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
26 void *arg)
27{
28 unsigned long i;
29
30 for (i = 0; i < nr_pages; i++) {
31 if (pfn_valid(start_pfn + i) &&
32 !PageReserved(pfn_to_page(start_pfn + i)))
33 return 1;
34 }
35
36 return 0;
37}
38
39/*
40 * ioremap_prot - map bus memory into CPU space
41 * @phys_addr: bus address of the memory
42 * @size: size of the resource to map
43 *
44 * ioremap_prot gives the caller control over cache coherency attributes (CCA)
45 */
46void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
47 unsigned long prot_val)
48{
49 unsigned long flags = prot_val & _CACHE_MASK;
50 unsigned long offset, pfn, last_pfn;
51 struct vm_struct *area;
52 phys_addr_t last_addr;
53 unsigned long vaddr;
54 void __iomem *cpu_addr;
55
56 cpu_addr = plat_ioremap(phys_addr, size, flags);
57 if (cpu_addr)
58 return cpu_addr;
59
60 phys_addr = fixup_bigphys_addr(phys_addr, size);
61
62 /* Don't allow wraparound or zero size */
63 last_addr = phys_addr + size - 1;
64 if (!size || last_addr < phys_addr)
65 return NULL;
66
67 /*
68 * Map uncached objects in the low 512mb of address space using KSEG1,
69 * otherwise map using page tables.
70 */
71 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
72 flags == _CACHE_UNCACHED)
73 return (void __iomem *) CKSEG1ADDR(phys_addr);
74
75 /* Early remaps should use the unmapped regions til' VM is available */
76 if (WARN_ON_ONCE(!slab_is_available()))
77 return NULL;
78
79 /*
80 * Don't allow anybody to remap RAM that may be allocated by the page
81 * allocator, since that could lead to races & data clobbering.
82 */
83 pfn = PFN_DOWN(phys_addr);
84 last_pfn = PFN_DOWN(last_addr);
85 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
86 __ioremap_check_ram) == 1) {
87 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
88 &phys_addr, &last_addr);
89 return NULL;
90 }
91
92 /*
93 * Mappings have to be page-aligned
94 */
95 offset = phys_addr & ~PAGE_MASK;
96 phys_addr &= PAGE_MASK;
97 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
98
99 /*
100 * Ok, go for it..
101 */
102 area = get_vm_area(size, VM_IOREMAP);
103 if (!area)
104 return NULL;
105 vaddr = (unsigned long)area->addr;
106
107 flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
108 if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
109 __pgprot(flags))) {
110 free_vm_area(area);
111 return NULL;
112 }
113
114 return (void __iomem *)(vaddr + offset);
115}
116EXPORT_SYMBOL(ioremap_prot);
117
118void iounmap(const volatile void __iomem *addr)
119{
120 if (!plat_iounmap(addr) && !IS_KSEG1(addr))
121 vunmap((void *)((unsigned long)addr & PAGE_MASK));
122}
123EXPORT_SYMBOL(iounmap);