Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m68k/mm/kmap.c
4 *
5 * Copyright (C) 1997 Roman Hodek
6 *
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
9 */
10
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18
19#include <asm/setup.h>
20#include <asm/segment.h>
21#include <asm/page.h>
22#include <asm/pgalloc.h>
23#include <asm/io.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptors and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appropriately aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else
52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 /* remove gap added in get_io_area() */
93 __iounmap(tmp->addr, tmp->size - IO_SIZE);
94 kfree(tmp);
95 return;
96 }
97 }
98}
99
100#endif
101
102/*
103 * Map some physical address range into the kernel address space.
104 */
105/* Rewritten by Andreas Schwab to remove all races. */
106
107void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
108{
109 struct vm_struct *area;
110 unsigned long virtaddr, retaddr;
111 long offset;
112 pgd_t *pgd_dir;
113 pmd_t *pmd_dir;
114 pte_t *pte_dir;
115
116 /*
117 * Don't allow mappings that wrap..
118 */
119 if (!size || physaddr > (unsigned long)(-size))
120 return NULL;
121
122#ifdef CONFIG_AMIGA
123 if (MACH_IS_AMIGA) {
124 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
125 && (cacheflag == IOMAP_NOCACHE_SER))
126 return (void __iomem *)physaddr;
127 }
128#endif
129#ifdef CONFIG_COLDFIRE
130 if (__cf_internalio(physaddr))
131 return (void __iomem *) physaddr;
132#endif
133
134#ifdef DEBUG
135 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
136#endif
137 /*
138 * Mappings have to be aligned
139 */
140 offset = physaddr & (IO_SIZE - 1);
141 physaddr &= -IO_SIZE;
142 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
143
144 /*
145 * Ok, go for it..
146 */
147 area = get_io_area(size);
148 if (!area)
149 return NULL;
150
151 virtaddr = (unsigned long)area->addr;
152 retaddr = virtaddr + offset;
153#ifdef DEBUG
154 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
155#endif
156
157 /*
158 * add cache and table flags to physical address
159 */
160 if (CPU_IS_040_OR_060) {
161 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
162 _PAGE_ACCESSED | _PAGE_DIRTY);
163 switch (cacheflag) {
164 case IOMAP_FULL_CACHING:
165 physaddr |= _PAGE_CACHE040;
166 break;
167 case IOMAP_NOCACHE_SER:
168 default:
169 physaddr |= _PAGE_NOCACHE_S;
170 break;
171 case IOMAP_NOCACHE_NONSER:
172 physaddr |= _PAGE_NOCACHE;
173 break;
174 case IOMAP_WRITETHROUGH:
175 physaddr |= _PAGE_CACHE040W;
176 break;
177 }
178 } else {
179 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
180 _PAGE_DIRTY | _PAGE_READWRITE);
181 switch (cacheflag) {
182 case IOMAP_NOCACHE_SER:
183 case IOMAP_NOCACHE_NONSER:
184 default:
185 physaddr |= _PAGE_NOCACHE030;
186 break;
187 case IOMAP_FULL_CACHING:
188 case IOMAP_WRITETHROUGH:
189 break;
190 }
191 }
192
193 while ((long)size > 0) {
194#ifdef DEBUG
195 if (!(virtaddr & (PTRTREESIZE-1)))
196 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
197#endif
198 pgd_dir = pgd_offset_k(virtaddr);
199 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
200 if (!pmd_dir) {
201 printk("ioremap: no mem for pmd_dir\n");
202 return NULL;
203 }
204
205 if (CPU_IS_020_OR_030) {
206 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
207 physaddr += PTRTREESIZE;
208 virtaddr += PTRTREESIZE;
209 size -= PTRTREESIZE;
210 } else {
211 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
212 if (!pte_dir) {
213 printk("ioremap: no mem for pte_dir\n");
214 return NULL;
215 }
216
217 pte_val(*pte_dir) = physaddr;
218 virtaddr += PAGE_SIZE;
219 physaddr += PAGE_SIZE;
220 size -= PAGE_SIZE;
221 }
222 }
223#ifdef DEBUG
224 printk("\n");
225#endif
226 flush_tlb_all();
227
228 return (void __iomem *)retaddr;
229}
230EXPORT_SYMBOL(__ioremap);
231
232/*
233 * Unmap an ioremap()ed region again
234 */
235void iounmap(void __iomem *addr)
236{
237#ifdef CONFIG_AMIGA
238 if ((!MACH_IS_AMIGA) ||
239 (((unsigned long)addr < 0x40000000) ||
240 ((unsigned long)addr > 0x60000000)))
241 free_io_area((__force void *)addr);
242#else
243#ifdef CONFIG_COLDFIRE
244 if (cf_internalio(addr))
245 return;
246#endif
247 free_io_area((__force void *)addr);
248#endif
249}
250EXPORT_SYMBOL(iounmap);
251
252/*
253 * __iounmap unmaps nearly everything, so be careful
254 * Currently it doesn't free pointer/page tables anymore but this
255 * wasn't used anyway and might be added later.
256 */
257void __iounmap(void *addr, unsigned long size)
258{
259 unsigned long virtaddr = (unsigned long)addr;
260 pgd_t *pgd_dir;
261 pmd_t *pmd_dir;
262 pte_t *pte_dir;
263
264 while ((long)size > 0) {
265 pgd_dir = pgd_offset_k(virtaddr);
266 if (pgd_bad(*pgd_dir)) {
267 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
268 pgd_clear(pgd_dir);
269 return;
270 }
271 pmd_dir = pmd_offset(pgd_dir, virtaddr);
272
273 if (CPU_IS_020_OR_030) {
274 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
275 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
276
277 if (pmd_type == _PAGE_PRESENT) {
278 pmd_dir->pmd[pmd_off] = 0;
279 virtaddr += PTRTREESIZE;
280 size -= PTRTREESIZE;
281 continue;
282 } else if (pmd_type == 0)
283 continue;
284 }
285
286 if (pmd_bad(*pmd_dir)) {
287 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
288 pmd_clear(pmd_dir);
289 return;
290 }
291 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
292
293 pte_val(*pte_dir) = 0;
294 virtaddr += PAGE_SIZE;
295 size -= PAGE_SIZE;
296 }
297
298 flush_tlb_all();
299}
300
301/*
302 * Set new cache mode for some kernel address space.
303 * The caller must push data for that range itself, if such data may already
304 * be in the cache.
305 */
306void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
307{
308 unsigned long virtaddr = (unsigned long)addr;
309 pgd_t *pgd_dir;
310 pmd_t *pmd_dir;
311 pte_t *pte_dir;
312
313 if (CPU_IS_040_OR_060) {
314 switch (cmode) {
315 case IOMAP_FULL_CACHING:
316 cmode = _PAGE_CACHE040;
317 break;
318 case IOMAP_NOCACHE_SER:
319 default:
320 cmode = _PAGE_NOCACHE_S;
321 break;
322 case IOMAP_NOCACHE_NONSER:
323 cmode = _PAGE_NOCACHE;
324 break;
325 case IOMAP_WRITETHROUGH:
326 cmode = _PAGE_CACHE040W;
327 break;
328 }
329 } else {
330 switch (cmode) {
331 case IOMAP_NOCACHE_SER:
332 case IOMAP_NOCACHE_NONSER:
333 default:
334 cmode = _PAGE_NOCACHE030;
335 break;
336 case IOMAP_FULL_CACHING:
337 case IOMAP_WRITETHROUGH:
338 cmode = 0;
339 }
340 }
341
342 while ((long)size > 0) {
343 pgd_dir = pgd_offset_k(virtaddr);
344 if (pgd_bad(*pgd_dir)) {
345 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
346 pgd_clear(pgd_dir);
347 return;
348 }
349 pmd_dir = pmd_offset(pgd_dir, virtaddr);
350
351 if (CPU_IS_020_OR_030) {
352 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
353
354 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
355 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
356 _CACHEMASK040) | cmode;
357 virtaddr += PTRTREESIZE;
358 size -= PTRTREESIZE;
359 continue;
360 }
361 }
362
363 if (pmd_bad(*pmd_dir)) {
364 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
365 pmd_clear(pmd_dir);
366 return;
367 }
368 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
369
370 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
371 virtaddr += PAGE_SIZE;
372 size -= PAGE_SIZE;
373 }
374
375 flush_tlb_all();
376}
377EXPORT_SYMBOL(kernel_set_cachemode);
1/*
2 * linux/arch/m68k/mm/kmap.c
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else
52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 __iounmap(tmp->addr, tmp->size);
93 kfree(tmp);
94 return;
95 }
96 }
97}
98
99#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
175 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER:
178 default:
179 physaddr |= _PAGE_NOCACHE030;
180 break;
181 case IOMAP_FULL_CACHING:
182 case IOMAP_WRITETHROUGH:
183 break;
184 }
185 }
186
187 while ((long)size > 0) {
188#ifdef DEBUG
189 if (!(virtaddr & (PTRTREESIZE-1)))
190 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
191#endif
192 pgd_dir = pgd_offset_k(virtaddr);
193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
194 if (!pmd_dir) {
195 printk("ioremap: no mem for pmd_dir\n");
196 return NULL;
197 }
198
199 if (CPU_IS_020_OR_030) {
200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
201 physaddr += PTRTREESIZE;
202 virtaddr += PTRTREESIZE;
203 size -= PTRTREESIZE;
204 } else {
205 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
206 if (!pte_dir) {
207 printk("ioremap: no mem for pte_dir\n");
208 return NULL;
209 }
210
211 pte_val(*pte_dir) = physaddr;
212 virtaddr += PAGE_SIZE;
213 physaddr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 }
217#ifdef DEBUG
218 printk("\n");
219#endif
220 flush_tlb_all();
221
222 return (void __iomem *)retaddr;
223}
224EXPORT_SYMBOL(__ioremap);
225
226/*
227 * Unmap a ioremap()ed region again
228 */
229void iounmap(void __iomem *addr)
230{
231#ifdef CONFIG_AMIGA
232 if ((!MACH_IS_AMIGA) ||
233 (((unsigned long)addr < 0x40000000) ||
234 ((unsigned long)addr > 0x60000000)))
235 free_io_area((__force void *)addr);
236#else
237 free_io_area((__force void *)addr);
238#endif
239}
240EXPORT_SYMBOL(iounmap);
241
242/*
243 * __iounmap unmaps nearly everything, so be careful
244 * it doesn't free currently pointer/page tables anymore but it
245 * wans't used anyway and might be added later.
246 */
247void __iounmap(void *addr, unsigned long size)
248{
249 unsigned long virtaddr = (unsigned long)addr;
250 pgd_t *pgd_dir;
251 pmd_t *pmd_dir;
252 pte_t *pte_dir;
253
254 while ((long)size > 0) {
255 pgd_dir = pgd_offset_k(virtaddr);
256 if (pgd_bad(*pgd_dir)) {
257 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258 pgd_clear(pgd_dir);
259 return;
260 }
261 pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
263 if (CPU_IS_020_OR_030) {
264 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
266
267 if (pmd_type == _PAGE_PRESENT) {
268 pmd_dir->pmd[pmd_off] = 0;
269 virtaddr += PTRTREESIZE;
270 size -= PTRTREESIZE;
271 continue;
272 } else if (pmd_type == 0)
273 continue;
274 }
275
276 if (pmd_bad(*pmd_dir)) {
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
278 pmd_clear(pmd_dir);
279 return;
280 }
281 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
282
283 pte_val(*pte_dir) = 0;
284 virtaddr += PAGE_SIZE;
285 size -= PAGE_SIZE;
286 }
287
288 flush_tlb_all();
289}
290
291/*
292 * Set new cache mode for some kernel address space.
293 * The caller must push data for that range itself, if such data may already
294 * be in the cache.
295 */
296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
297{
298 unsigned long virtaddr = (unsigned long)addr;
299 pgd_t *pgd_dir;
300 pmd_t *pmd_dir;
301 pte_t *pte_dir;
302
303 if (CPU_IS_040_OR_060) {
304 switch (cmode) {
305 case IOMAP_FULL_CACHING:
306 cmode = _PAGE_CACHE040;
307 break;
308 case IOMAP_NOCACHE_SER:
309 default:
310 cmode = _PAGE_NOCACHE_S;
311 break;
312 case IOMAP_NOCACHE_NONSER:
313 cmode = _PAGE_NOCACHE;
314 break;
315 case IOMAP_WRITETHROUGH:
316 cmode = _PAGE_CACHE040W;
317 break;
318 }
319 } else {
320 switch (cmode) {
321 case IOMAP_NOCACHE_SER:
322 case IOMAP_NOCACHE_NONSER:
323 default:
324 cmode = _PAGE_NOCACHE030;
325 break;
326 case IOMAP_FULL_CACHING:
327 case IOMAP_WRITETHROUGH:
328 cmode = 0;
329 }
330 }
331
332 while ((long)size > 0) {
333 pgd_dir = pgd_offset_k(virtaddr);
334 if (pgd_bad(*pgd_dir)) {
335 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
336 pgd_clear(pgd_dir);
337 return;
338 }
339 pmd_dir = pmd_offset(pgd_dir, virtaddr);
340
341 if (CPU_IS_020_OR_030) {
342 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
343
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
346 _CACHEMASK040) | cmode;
347 virtaddr += PTRTREESIZE;
348 size -= PTRTREESIZE;
349 continue;
350 }
351 }
352
353 if (pmd_bad(*pmd_dir)) {
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
355 pmd_clear(pmd_dir);
356 return;
357 }
358 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
359
360 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
361 virtaddr += PAGE_SIZE;
362 size -= PAGE_SIZE;
363 }
364
365 flush_tlb_all();
366}
367EXPORT_SYMBOL(kernel_set_cachemode);