Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m68k/mm/kmap.c
4 *
5 * Copyright (C) 1997 Roman Hodek
6 *
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
9 */
10
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18
19#include <asm/setup.h>
20#include <asm/segment.h>
21#include <asm/page.h>
22#include <asm/io.h>
23#include <asm/tlbflush.h>
24
25#undef DEBUG
26
27/*
28 * For 040/060 we can use the virtual memory area like other architectures,
29 * but for 020/030 we want to use early termination page descriptors and we
30 * can't mix this with normal page descriptors, so we have to copy that code
31 * (mm/vmalloc.c) and return appropriately aligned addresses.
32 */
33
34#ifdef CPU_M68040_OR_M68060_ONLY
35
36#define IO_SIZE PAGE_SIZE
37
38static inline struct vm_struct *get_io_area(unsigned long size)
39{
40 return get_vm_area(size, VM_IOREMAP);
41}
42
43
44static inline void free_io_area(void *addr)
45{
46 vfree((void *)(PAGE_MASK & (unsigned long)addr));
47}
48
49#else
50
51#define IO_SIZE PMD_SIZE
52
53static struct vm_struct *iolist;
54
55/*
56 * __free_io_area unmaps nearly everything, so be careful
57 * Currently it doesn't free pointer/page tables anymore but this
58 * wasn't used anyway and might be added later.
59 */
60static void __free_io_area(void *addr, unsigned long size)
61{
62 unsigned long virtaddr = (unsigned long)addr;
63 pgd_t *pgd_dir;
64 p4d_t *p4d_dir;
65 pud_t *pud_dir;
66 pmd_t *pmd_dir;
67 pte_t *pte_dir;
68
69 while ((long)size > 0) {
70 pgd_dir = pgd_offset_k(virtaddr);
71 p4d_dir = p4d_offset(pgd_dir, virtaddr);
72 pud_dir = pud_offset(p4d_dir, virtaddr);
73 if (pud_bad(*pud_dir)) {
74 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
75 pud_clear(pud_dir);
76 return;
77 }
78 pmd_dir = pmd_offset(pud_dir, virtaddr);
79
80#if CONFIG_PGTABLE_LEVELS == 3
81 if (CPU_IS_020_OR_030) {
82 int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
83
84 if (pmd_type == _PAGE_PRESENT) {
85 pmd_clear(pmd_dir);
86 virtaddr += PMD_SIZE;
87 size -= PMD_SIZE;
88
89 } else if (pmd_type == 0)
90 continue;
91 }
92#endif
93
94 if (pmd_bad(*pmd_dir)) {
95 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
96 pmd_clear(pmd_dir);
97 return;
98 }
99 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
100
101 pte_val(*pte_dir) = 0;
102 virtaddr += PAGE_SIZE;
103 size -= PAGE_SIZE;
104 }
105
106 flush_tlb_all();
107}
108
109static struct vm_struct *get_io_area(unsigned long size)
110{
111 unsigned long addr;
112 struct vm_struct **p, *tmp, *area;
113
114 area = kmalloc(sizeof(*area), GFP_KERNEL);
115 if (!area)
116 return NULL;
117 addr = KMAP_START;
118 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
119 if (size + addr < (unsigned long)tmp->addr)
120 break;
121 if (addr > KMAP_END-size) {
122 kfree(area);
123 return NULL;
124 }
125 addr = tmp->size + (unsigned long)tmp->addr;
126 }
127 area->addr = (void *)addr;
128 area->size = size + IO_SIZE;
129 area->next = *p;
130 *p = area;
131 return area;
132}
133
134static inline void free_io_area(void *addr)
135{
136 struct vm_struct **p, *tmp;
137
138 if (!addr)
139 return;
140 addr = (void *)((unsigned long)addr & -IO_SIZE);
141 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
142 if (tmp->addr == addr) {
143 *p = tmp->next;
144 /* remove gap added in get_io_area() */
145 __free_io_area(tmp->addr, tmp->size - IO_SIZE);
146 kfree(tmp);
147 return;
148 }
149 }
150}
151
152#endif
153
154/*
155 * Map some physical address range into the kernel address space.
156 */
157/* Rewritten by Andreas Schwab to remove all races. */
158
159void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
160{
161 struct vm_struct *area;
162 unsigned long virtaddr, retaddr;
163 long offset;
164 pgd_t *pgd_dir;
165 p4d_t *p4d_dir;
166 pud_t *pud_dir;
167 pmd_t *pmd_dir;
168 pte_t *pte_dir;
169
170 /*
171 * Don't allow mappings that wrap..
172 */
173 if (!size || physaddr > (unsigned long)(-size))
174 return NULL;
175
176#ifdef CONFIG_AMIGA
177 if (MACH_IS_AMIGA) {
178 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
179 && (cacheflag == IOMAP_NOCACHE_SER))
180 return (void __iomem *)physaddr;
181 }
182#endif
183#ifdef CONFIG_COLDFIRE
184 if (__cf_internalio(physaddr))
185 return (void __iomem *) physaddr;
186#endif
187
188#ifdef DEBUG
189 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
190#endif
191 /*
192 * Mappings have to be aligned
193 */
194 offset = physaddr & (IO_SIZE - 1);
195 physaddr &= -IO_SIZE;
196 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
197
198 /*
199 * Ok, go for it..
200 */
201 area = get_io_area(size);
202 if (!area)
203 return NULL;
204
205 virtaddr = (unsigned long)area->addr;
206 retaddr = virtaddr + offset;
207#ifdef DEBUG
208 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
209#endif
210
211 /*
212 * add cache and table flags to physical address
213 */
214 if (CPU_IS_040_OR_060) {
215 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
216 _PAGE_ACCESSED | _PAGE_DIRTY);
217 switch (cacheflag) {
218 case IOMAP_FULL_CACHING:
219 physaddr |= _PAGE_CACHE040;
220 break;
221 case IOMAP_NOCACHE_SER:
222 default:
223 physaddr |= _PAGE_NOCACHE_S;
224 break;
225 case IOMAP_NOCACHE_NONSER:
226 physaddr |= _PAGE_NOCACHE;
227 break;
228 case IOMAP_WRITETHROUGH:
229 physaddr |= _PAGE_CACHE040W;
230 break;
231 }
232 } else {
233 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
234 _PAGE_DIRTY | _PAGE_READWRITE);
235 switch (cacheflag) {
236 case IOMAP_NOCACHE_SER:
237 case IOMAP_NOCACHE_NONSER:
238 default:
239 physaddr |= _PAGE_NOCACHE030;
240 break;
241 case IOMAP_FULL_CACHING:
242 case IOMAP_WRITETHROUGH:
243 break;
244 }
245 }
246
247 while ((long)size > 0) {
248#ifdef DEBUG
249 if (!(virtaddr & (PMD_SIZE-1)))
250 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
251#endif
252 pgd_dir = pgd_offset_k(virtaddr);
253 p4d_dir = p4d_offset(pgd_dir, virtaddr);
254 pud_dir = pud_offset(p4d_dir, virtaddr);
255 pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
256 if (!pmd_dir) {
257 printk("ioremap: no mem for pmd_dir\n");
258 return NULL;
259 }
260
261#if CONFIG_PGTABLE_LEVELS == 3
262 if (CPU_IS_020_OR_030) {
263 pmd_val(*pmd_dir) = physaddr;
264 physaddr += PMD_SIZE;
265 virtaddr += PMD_SIZE;
266 size -= PMD_SIZE;
267 } else
268#endif
269 {
270 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
271 if (!pte_dir) {
272 printk("ioremap: no mem for pte_dir\n");
273 return NULL;
274 }
275
276 pte_val(*pte_dir) = physaddr;
277 virtaddr += PAGE_SIZE;
278 physaddr += PAGE_SIZE;
279 size -= PAGE_SIZE;
280 }
281 }
282#ifdef DEBUG
283 printk("\n");
284#endif
285 flush_tlb_all();
286
287 return (void __iomem *)retaddr;
288}
289EXPORT_SYMBOL(__ioremap);
290
291/*
292 * Unmap an ioremap()ed region again
293 */
294void iounmap(void __iomem *addr)
295{
296#ifdef CONFIG_AMIGA
297 if ((!MACH_IS_AMIGA) ||
298 (((unsigned long)addr < 0x40000000) ||
299 ((unsigned long)addr > 0x60000000)))
300 free_io_area((__force void *)addr);
301#else
302#ifdef CONFIG_COLDFIRE
303 if (cf_internalio(addr))
304 return;
305#endif
306 free_io_area((__force void *)addr);
307#endif
308}
309EXPORT_SYMBOL(iounmap);
310
311/*
312 * Set new cache mode for some kernel address space.
313 * The caller must push data for that range itself, if such data may already
314 * be in the cache.
315 */
316void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
317{
318 unsigned long virtaddr = (unsigned long)addr;
319 pgd_t *pgd_dir;
320 p4d_t *p4d_dir;
321 pud_t *pud_dir;
322 pmd_t *pmd_dir;
323 pte_t *pte_dir;
324
325 if (CPU_IS_040_OR_060) {
326 switch (cmode) {
327 case IOMAP_FULL_CACHING:
328 cmode = _PAGE_CACHE040;
329 break;
330 case IOMAP_NOCACHE_SER:
331 default:
332 cmode = _PAGE_NOCACHE_S;
333 break;
334 case IOMAP_NOCACHE_NONSER:
335 cmode = _PAGE_NOCACHE;
336 break;
337 case IOMAP_WRITETHROUGH:
338 cmode = _PAGE_CACHE040W;
339 break;
340 }
341 } else {
342 switch (cmode) {
343 case IOMAP_NOCACHE_SER:
344 case IOMAP_NOCACHE_NONSER:
345 default:
346 cmode = _PAGE_NOCACHE030;
347 break;
348 case IOMAP_FULL_CACHING:
349 case IOMAP_WRITETHROUGH:
350 cmode = 0;
351 }
352 }
353
354 while ((long)size > 0) {
355 pgd_dir = pgd_offset_k(virtaddr);
356 p4d_dir = p4d_offset(pgd_dir, virtaddr);
357 pud_dir = pud_offset(p4d_dir, virtaddr);
358 if (pud_bad(*pud_dir)) {
359 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
360 pud_clear(pud_dir);
361 return;
362 }
363 pmd_dir = pmd_offset(pud_dir, virtaddr);
364
365#if CONFIG_PGTABLE_LEVELS == 3
366 if (CPU_IS_020_OR_030) {
367 unsigned long pmd = pmd_val(*pmd_dir);
368
369 if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
370 *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
371 virtaddr += PMD_SIZE;
372 size -= PMD_SIZE;
373 continue;
374 }
375 }
376#endif
377
378 if (pmd_bad(*pmd_dir)) {
379 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
380 pmd_clear(pmd_dir);
381 return;
382 }
383 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
384
385 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
386 virtaddr += PAGE_SIZE;
387 size -= PAGE_SIZE;
388 }
389
390 flush_tlb_all();
391}
392EXPORT_SYMBOL(kernel_set_cachemode);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m68k/mm/kmap.c
4 *
5 * Copyright (C) 1997 Roman Hodek
6 *
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
9 */
10
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18
19#include <asm/setup.h>
20#include <asm/segment.h>
21#include <asm/page.h>
22#include <asm/pgalloc.h>
23#include <asm/io.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptors and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appropriately aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else
52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 /* remove gap added in get_io_area() */
93 __iounmap(tmp->addr, tmp->size - IO_SIZE);
94 kfree(tmp);
95 return;
96 }
97 }
98}
99
100#endif
101
102/*
103 * Map some physical address range into the kernel address space.
104 */
105/* Rewritten by Andreas Schwab to remove all races. */
106
107void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
108{
109 struct vm_struct *area;
110 unsigned long virtaddr, retaddr;
111 long offset;
112 pgd_t *pgd_dir;
113 pmd_t *pmd_dir;
114 pte_t *pte_dir;
115
116 /*
117 * Don't allow mappings that wrap..
118 */
119 if (!size || physaddr > (unsigned long)(-size))
120 return NULL;
121
122#ifdef CONFIG_AMIGA
123 if (MACH_IS_AMIGA) {
124 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
125 && (cacheflag == IOMAP_NOCACHE_SER))
126 return (void __iomem *)physaddr;
127 }
128#endif
129#ifdef CONFIG_COLDFIRE
130 if (__cf_internalio(physaddr))
131 return (void __iomem *) physaddr;
132#endif
133
134#ifdef DEBUG
135 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
136#endif
137 /*
138 * Mappings have to be aligned
139 */
140 offset = physaddr & (IO_SIZE - 1);
141 physaddr &= -IO_SIZE;
142 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
143
144 /*
145 * Ok, go for it..
146 */
147 area = get_io_area(size);
148 if (!area)
149 return NULL;
150
151 virtaddr = (unsigned long)area->addr;
152 retaddr = virtaddr + offset;
153#ifdef DEBUG
154 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
155#endif
156
157 /*
158 * add cache and table flags to physical address
159 */
160 if (CPU_IS_040_OR_060) {
161 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
162 _PAGE_ACCESSED | _PAGE_DIRTY);
163 switch (cacheflag) {
164 case IOMAP_FULL_CACHING:
165 physaddr |= _PAGE_CACHE040;
166 break;
167 case IOMAP_NOCACHE_SER:
168 default:
169 physaddr |= _PAGE_NOCACHE_S;
170 break;
171 case IOMAP_NOCACHE_NONSER:
172 physaddr |= _PAGE_NOCACHE;
173 break;
174 case IOMAP_WRITETHROUGH:
175 physaddr |= _PAGE_CACHE040W;
176 break;
177 }
178 } else {
179 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
180 _PAGE_DIRTY | _PAGE_READWRITE);
181 switch (cacheflag) {
182 case IOMAP_NOCACHE_SER:
183 case IOMAP_NOCACHE_NONSER:
184 default:
185 physaddr |= _PAGE_NOCACHE030;
186 break;
187 case IOMAP_FULL_CACHING:
188 case IOMAP_WRITETHROUGH:
189 break;
190 }
191 }
192
193 while ((long)size > 0) {
194#ifdef DEBUG
195 if (!(virtaddr & (PTRTREESIZE-1)))
196 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
197#endif
198 pgd_dir = pgd_offset_k(virtaddr);
199 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
200 if (!pmd_dir) {
201 printk("ioremap: no mem for pmd_dir\n");
202 return NULL;
203 }
204
205 if (CPU_IS_020_OR_030) {
206 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
207 physaddr += PTRTREESIZE;
208 virtaddr += PTRTREESIZE;
209 size -= PTRTREESIZE;
210 } else {
211 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
212 if (!pte_dir) {
213 printk("ioremap: no mem for pte_dir\n");
214 return NULL;
215 }
216
217 pte_val(*pte_dir) = physaddr;
218 virtaddr += PAGE_SIZE;
219 physaddr += PAGE_SIZE;
220 size -= PAGE_SIZE;
221 }
222 }
223#ifdef DEBUG
224 printk("\n");
225#endif
226 flush_tlb_all();
227
228 return (void __iomem *)retaddr;
229}
230EXPORT_SYMBOL(__ioremap);
231
232/*
233 * Unmap an ioremap()ed region again
234 */
235void iounmap(void __iomem *addr)
236{
237#ifdef CONFIG_AMIGA
238 if ((!MACH_IS_AMIGA) ||
239 (((unsigned long)addr < 0x40000000) ||
240 ((unsigned long)addr > 0x60000000)))
241 free_io_area((__force void *)addr);
242#else
243#ifdef CONFIG_COLDFIRE
244 if (cf_internalio(addr))
245 return;
246#endif
247 free_io_area((__force void *)addr);
248#endif
249}
250EXPORT_SYMBOL(iounmap);
251
252/*
253 * __iounmap unmaps nearly everything, so be careful
254 * Currently it doesn't free pointer/page tables anymore but this
255 * wasn't used anyway and might be added later.
256 */
257void __iounmap(void *addr, unsigned long size)
258{
259 unsigned long virtaddr = (unsigned long)addr;
260 pgd_t *pgd_dir;
261 pmd_t *pmd_dir;
262 pte_t *pte_dir;
263
264 while ((long)size > 0) {
265 pgd_dir = pgd_offset_k(virtaddr);
266 if (pgd_bad(*pgd_dir)) {
267 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
268 pgd_clear(pgd_dir);
269 return;
270 }
271 pmd_dir = pmd_offset(pgd_dir, virtaddr);
272
273 if (CPU_IS_020_OR_030) {
274 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
275 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
276
277 if (pmd_type == _PAGE_PRESENT) {
278 pmd_dir->pmd[pmd_off] = 0;
279 virtaddr += PTRTREESIZE;
280 size -= PTRTREESIZE;
281 continue;
282 } else if (pmd_type == 0)
283 continue;
284 }
285
286 if (pmd_bad(*pmd_dir)) {
287 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
288 pmd_clear(pmd_dir);
289 return;
290 }
291 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
292
293 pte_val(*pte_dir) = 0;
294 virtaddr += PAGE_SIZE;
295 size -= PAGE_SIZE;
296 }
297
298 flush_tlb_all();
299}
300
301/*
302 * Set new cache mode for some kernel address space.
303 * The caller must push data for that range itself, if such data may already
304 * be in the cache.
305 */
306void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
307{
308 unsigned long virtaddr = (unsigned long)addr;
309 pgd_t *pgd_dir;
310 pmd_t *pmd_dir;
311 pte_t *pte_dir;
312
313 if (CPU_IS_040_OR_060) {
314 switch (cmode) {
315 case IOMAP_FULL_CACHING:
316 cmode = _PAGE_CACHE040;
317 break;
318 case IOMAP_NOCACHE_SER:
319 default:
320 cmode = _PAGE_NOCACHE_S;
321 break;
322 case IOMAP_NOCACHE_NONSER:
323 cmode = _PAGE_NOCACHE;
324 break;
325 case IOMAP_WRITETHROUGH:
326 cmode = _PAGE_CACHE040W;
327 break;
328 }
329 } else {
330 switch (cmode) {
331 case IOMAP_NOCACHE_SER:
332 case IOMAP_NOCACHE_NONSER:
333 default:
334 cmode = _PAGE_NOCACHE030;
335 break;
336 case IOMAP_FULL_CACHING:
337 case IOMAP_WRITETHROUGH:
338 cmode = 0;
339 }
340 }
341
342 while ((long)size > 0) {
343 pgd_dir = pgd_offset_k(virtaddr);
344 if (pgd_bad(*pgd_dir)) {
345 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
346 pgd_clear(pgd_dir);
347 return;
348 }
349 pmd_dir = pmd_offset(pgd_dir, virtaddr);
350
351 if (CPU_IS_020_OR_030) {
352 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
353
354 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
355 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
356 _CACHEMASK040) | cmode;
357 virtaddr += PTRTREESIZE;
358 size -= PTRTREESIZE;
359 continue;
360 }
361 }
362
363 if (pmd_bad(*pmd_dir)) {
364 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
365 pmd_clear(pmd_dir);
366 return;
367 }
368 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
369
370 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
371 virtaddr += PAGE_SIZE;
372 size -= PAGE_SIZE;
373 }
374
375 flush_tlb_all();
376}
377EXPORT_SYMBOL(kernel_set_cachemode);