Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m68k/mm/kmap.c
4 *
5 * Copyright (C) 1997 Roman Hodek
6 *
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
9 */
10
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18
19#include <asm/setup.h>
20#include <asm/page.h>
21#include <asm/io.h>
22#include <asm/tlbflush.h>
23
24#undef DEBUG
25
26/*
27 * For 040/060 we can use the virtual memory area like other architectures,
28 * but for 020/030 we want to use early termination page descriptors and we
29 * can't mix this with normal page descriptors, so we have to copy that code
30 * (mm/vmalloc.c) and return appropriately aligned addresses.
31 */
32
33#ifdef CPU_M68040_OR_M68060_ONLY
34
35#define IO_SIZE PAGE_SIZE
36
37static inline struct vm_struct *get_io_area(unsigned long size)
38{
39 return get_vm_area(size, VM_IOREMAP);
40}
41
42
43static inline void free_io_area(void *addr)
44{
45 vfree((void *)(PAGE_MASK & (unsigned long)addr));
46}
47
48#else
49
50#define IO_SIZE PMD_SIZE
51
52static struct vm_struct *iolist;
53
54/*
55 * __free_io_area unmaps nearly everything, so be careful
56 * Currently it doesn't free pointer/page tables anymore but this
57 * wasn't used anyway and might be added later.
58 */
59static void __free_io_area(void *addr, unsigned long size)
60{
61 unsigned long virtaddr = (unsigned long)addr;
62 pgd_t *pgd_dir;
63 p4d_t *p4d_dir;
64 pud_t *pud_dir;
65 pmd_t *pmd_dir;
66 pte_t *pte_dir;
67
68 while ((long)size > 0) {
69 pgd_dir = pgd_offset_k(virtaddr);
70 p4d_dir = p4d_offset(pgd_dir, virtaddr);
71 pud_dir = pud_offset(p4d_dir, virtaddr);
72 if (pud_bad(*pud_dir)) {
73 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
74 pud_clear(pud_dir);
75 return;
76 }
77 pmd_dir = pmd_offset(pud_dir, virtaddr);
78
79#if CONFIG_PGTABLE_LEVELS == 3
80 if (CPU_IS_020_OR_030) {
81 int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
82
83 if (pmd_type == _PAGE_PRESENT) {
84 pmd_clear(pmd_dir);
85 virtaddr += PMD_SIZE;
86 size -= PMD_SIZE;
87
88 } else if (pmd_type == 0)
89 continue;
90 }
91#endif
92
93 if (pmd_bad(*pmd_dir)) {
94 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
95 pmd_clear(pmd_dir);
96 return;
97 }
98 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
99
100 pte_val(*pte_dir) = 0;
101 virtaddr += PAGE_SIZE;
102 size -= PAGE_SIZE;
103 }
104
105 flush_tlb_all();
106}
107
108static struct vm_struct *get_io_area(unsigned long size)
109{
110 unsigned long addr;
111 struct vm_struct **p, *tmp, *area;
112
113 area = kmalloc(sizeof(*area), GFP_KERNEL);
114 if (!area)
115 return NULL;
116 addr = KMAP_START;
117 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
118 if (size + addr < (unsigned long)tmp->addr)
119 break;
120 if (addr > KMAP_END-size) {
121 kfree(area);
122 return NULL;
123 }
124 addr = tmp->size + (unsigned long)tmp->addr;
125 }
126 area->addr = (void *)addr;
127 area->size = size + IO_SIZE;
128 area->next = *p;
129 *p = area;
130 return area;
131}
132
133static inline void free_io_area(void *addr)
134{
135 struct vm_struct **p, *tmp;
136
137 if (!addr)
138 return;
139 addr = (void *)((unsigned long)addr & -IO_SIZE);
140 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
141 if (tmp->addr == addr) {
142 *p = tmp->next;
143 /* remove gap added in get_io_area() */
144 __free_io_area(tmp->addr, tmp->size - IO_SIZE);
145 kfree(tmp);
146 return;
147 }
148 }
149}
150
151#endif
152
153/*
154 * Map some physical address range into the kernel address space.
155 */
156/* Rewritten by Andreas Schwab to remove all races. */
157
158void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
159{
160 struct vm_struct *area;
161 unsigned long virtaddr, retaddr;
162 long offset;
163 pgd_t *pgd_dir;
164 p4d_t *p4d_dir;
165 pud_t *pud_dir;
166 pmd_t *pmd_dir;
167 pte_t *pte_dir;
168
169 /*
170 * Don't allow mappings that wrap..
171 */
172 if (!size || physaddr > (unsigned long)(-size))
173 return NULL;
174
175#ifdef CONFIG_AMIGA
176 if (MACH_IS_AMIGA) {
177 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
178 && (cacheflag == IOMAP_NOCACHE_SER))
179 return (void __iomem *)physaddr;
180 }
181#endif
182#ifdef CONFIG_VIRT
183 if (MACH_IS_VIRT) {
184 if (physaddr >= 0xff000000 && cacheflag == IOMAP_NOCACHE_SER)
185 return (void __iomem *)physaddr;
186 }
187#endif
188#ifdef CONFIG_COLDFIRE
189 if (__cf_internalio(physaddr))
190 return (void __iomem *) physaddr;
191#endif
192
193#ifdef DEBUG
194 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
195#endif
196 /*
197 * Mappings have to be aligned
198 */
199 offset = physaddr & (IO_SIZE - 1);
200 physaddr &= -IO_SIZE;
201 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
202
203 /*
204 * Ok, go for it..
205 */
206 area = get_io_area(size);
207 if (!area)
208 return NULL;
209
210 virtaddr = (unsigned long)area->addr;
211 retaddr = virtaddr + offset;
212#ifdef DEBUG
213 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
214#endif
215
216 /*
217 * add cache and table flags to physical address
218 */
219 if (CPU_IS_040_OR_060) {
220 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
221 _PAGE_ACCESSED | _PAGE_DIRTY);
222 switch (cacheflag) {
223 case IOMAP_FULL_CACHING:
224 physaddr |= _PAGE_CACHE040;
225 break;
226 case IOMAP_NOCACHE_SER:
227 default:
228 physaddr |= _PAGE_NOCACHE_S;
229 break;
230 case IOMAP_NOCACHE_NONSER:
231 physaddr |= _PAGE_NOCACHE;
232 break;
233 case IOMAP_WRITETHROUGH:
234 physaddr |= _PAGE_CACHE040W;
235 break;
236 }
237 } else {
238 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
239 _PAGE_DIRTY | _PAGE_READWRITE);
240 switch (cacheflag) {
241 case IOMAP_NOCACHE_SER:
242 case IOMAP_NOCACHE_NONSER:
243 default:
244 physaddr |= _PAGE_NOCACHE030;
245 break;
246 case IOMAP_FULL_CACHING:
247 case IOMAP_WRITETHROUGH:
248 break;
249 }
250 }
251
252 while ((long)size > 0) {
253#ifdef DEBUG
254 if (!(virtaddr & (PMD_SIZE-1)))
255 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
256#endif
257 pgd_dir = pgd_offset_k(virtaddr);
258 p4d_dir = p4d_offset(pgd_dir, virtaddr);
259 pud_dir = pud_offset(p4d_dir, virtaddr);
260 pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
261 if (!pmd_dir) {
262 printk("ioremap: no mem for pmd_dir\n");
263 return NULL;
264 }
265
266#if CONFIG_PGTABLE_LEVELS == 3
267 if (CPU_IS_020_OR_030) {
268 pmd_val(*pmd_dir) = physaddr;
269 physaddr += PMD_SIZE;
270 virtaddr += PMD_SIZE;
271 size -= PMD_SIZE;
272 } else
273#endif
274 {
275 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
276 if (!pte_dir) {
277 printk("ioremap: no mem for pte_dir\n");
278 return NULL;
279 }
280
281 pte_val(*pte_dir) = physaddr;
282 virtaddr += PAGE_SIZE;
283 physaddr += PAGE_SIZE;
284 size -= PAGE_SIZE;
285 }
286 }
287#ifdef DEBUG
288 printk("\n");
289#endif
290 flush_tlb_all();
291
292 return (void __iomem *)retaddr;
293}
294EXPORT_SYMBOL(__ioremap);
295
296/*
297 * Unmap an ioremap()ed region again
298 */
299void iounmap(void __iomem *addr)
300{
301#ifdef CONFIG_AMIGA
302 if (MACH_IS_AMIGA &&
303 ((unsigned long)addr >= 0x40000000) &&
304 ((unsigned long)addr < 0x60000000))
305 return;
306#endif
307#ifdef CONFIG_VIRT
308 if (MACH_IS_VIRT && (unsigned long)addr >= 0xff000000)
309 return;
310#endif
311#ifdef CONFIG_COLDFIRE
312 if (cf_internalio(addr))
313 return;
314#endif
315 free_io_area((__force void *)addr);
316}
317EXPORT_SYMBOL(iounmap);
318
319/*
320 * Set new cache mode for some kernel address space.
321 * The caller must push data for that range itself, if such data may already
322 * be in the cache.
323 */
324void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
325{
326 unsigned long virtaddr = (unsigned long)addr;
327 pgd_t *pgd_dir;
328 p4d_t *p4d_dir;
329 pud_t *pud_dir;
330 pmd_t *pmd_dir;
331 pte_t *pte_dir;
332
333 if (CPU_IS_040_OR_060) {
334 switch (cmode) {
335 case IOMAP_FULL_CACHING:
336 cmode = _PAGE_CACHE040;
337 break;
338 case IOMAP_NOCACHE_SER:
339 default:
340 cmode = _PAGE_NOCACHE_S;
341 break;
342 case IOMAP_NOCACHE_NONSER:
343 cmode = _PAGE_NOCACHE;
344 break;
345 case IOMAP_WRITETHROUGH:
346 cmode = _PAGE_CACHE040W;
347 break;
348 }
349 } else {
350 switch (cmode) {
351 case IOMAP_NOCACHE_SER:
352 case IOMAP_NOCACHE_NONSER:
353 default:
354 cmode = _PAGE_NOCACHE030;
355 break;
356 case IOMAP_FULL_CACHING:
357 case IOMAP_WRITETHROUGH:
358 cmode = 0;
359 }
360 }
361
362 while ((long)size > 0) {
363 pgd_dir = pgd_offset_k(virtaddr);
364 p4d_dir = p4d_offset(pgd_dir, virtaddr);
365 pud_dir = pud_offset(p4d_dir, virtaddr);
366 if (pud_bad(*pud_dir)) {
367 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
368 pud_clear(pud_dir);
369 return;
370 }
371 pmd_dir = pmd_offset(pud_dir, virtaddr);
372
373#if CONFIG_PGTABLE_LEVELS == 3
374 if (CPU_IS_020_OR_030) {
375 unsigned long pmd = pmd_val(*pmd_dir);
376
377 if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
378 *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
379 virtaddr += PMD_SIZE;
380 size -= PMD_SIZE;
381 continue;
382 }
383 }
384#endif
385
386 if (pmd_bad(*pmd_dir)) {
387 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
388 pmd_clear(pmd_dir);
389 return;
390 }
391 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
392
393 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
394 virtaddr += PAGE_SIZE;
395 size -= PAGE_SIZE;
396 }
397
398 flush_tlb_all();
399}
400EXPORT_SYMBOL(kernel_set_cachemode);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/m68k/mm/kmap.c
4 *
5 * Copyright (C) 1997 Roman Hodek
6 *
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
9 */
10
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18
19#include <asm/setup.h>
20#include <asm/segment.h>
21#include <asm/page.h>
22#include <asm/io.h>
23#include <asm/tlbflush.h>
24
25#undef DEBUG
26
27/*
28 * For 040/060 we can use the virtual memory area like other architectures,
29 * but for 020/030 we want to use early termination page descriptors and we
30 * can't mix this with normal page descriptors, so we have to copy that code
31 * (mm/vmalloc.c) and return appropriately aligned addresses.
32 */
33
34#ifdef CPU_M68040_OR_M68060_ONLY
35
36#define IO_SIZE PAGE_SIZE
37
38static inline struct vm_struct *get_io_area(unsigned long size)
39{
40 return get_vm_area(size, VM_IOREMAP);
41}
42
43
44static inline void free_io_area(void *addr)
45{
46 vfree((void *)(PAGE_MASK & (unsigned long)addr));
47}
48
49#else
50
51#define IO_SIZE PMD_SIZE
52
53static struct vm_struct *iolist;
54
55/*
56 * __free_io_area unmaps nearly everything, so be careful
57 * Currently it doesn't free pointer/page tables anymore but this
58 * wasn't used anyway and might be added later.
59 */
60static void __free_io_area(void *addr, unsigned long size)
61{
62 unsigned long virtaddr = (unsigned long)addr;
63 pgd_t *pgd_dir;
64 p4d_t *p4d_dir;
65 pud_t *pud_dir;
66 pmd_t *pmd_dir;
67 pte_t *pte_dir;
68
69 while ((long)size > 0) {
70 pgd_dir = pgd_offset_k(virtaddr);
71 p4d_dir = p4d_offset(pgd_dir, virtaddr);
72 pud_dir = pud_offset(p4d_dir, virtaddr);
73 if (pud_bad(*pud_dir)) {
74 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
75 pud_clear(pud_dir);
76 return;
77 }
78 pmd_dir = pmd_offset(pud_dir, virtaddr);
79
80#if CONFIG_PGTABLE_LEVELS == 3
81 if (CPU_IS_020_OR_030) {
82 int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
83
84 if (pmd_type == _PAGE_PRESENT) {
85 pmd_clear(pmd_dir);
86 virtaddr += PMD_SIZE;
87 size -= PMD_SIZE;
88
89 } else if (pmd_type == 0)
90 continue;
91 }
92#endif
93
94 if (pmd_bad(*pmd_dir)) {
95 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
96 pmd_clear(pmd_dir);
97 return;
98 }
99 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
100
101 pte_val(*pte_dir) = 0;
102 virtaddr += PAGE_SIZE;
103 size -= PAGE_SIZE;
104 }
105
106 flush_tlb_all();
107}
108
109static struct vm_struct *get_io_area(unsigned long size)
110{
111 unsigned long addr;
112 struct vm_struct **p, *tmp, *area;
113
114 area = kmalloc(sizeof(*area), GFP_KERNEL);
115 if (!area)
116 return NULL;
117 addr = KMAP_START;
118 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
119 if (size + addr < (unsigned long)tmp->addr)
120 break;
121 if (addr > KMAP_END-size) {
122 kfree(area);
123 return NULL;
124 }
125 addr = tmp->size + (unsigned long)tmp->addr;
126 }
127 area->addr = (void *)addr;
128 area->size = size + IO_SIZE;
129 area->next = *p;
130 *p = area;
131 return area;
132}
133
134static inline void free_io_area(void *addr)
135{
136 struct vm_struct **p, *tmp;
137
138 if (!addr)
139 return;
140 addr = (void *)((unsigned long)addr & -IO_SIZE);
141 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
142 if (tmp->addr == addr) {
143 *p = tmp->next;
144 /* remove gap added in get_io_area() */
145 __free_io_area(tmp->addr, tmp->size - IO_SIZE);
146 kfree(tmp);
147 return;
148 }
149 }
150}
151
152#endif
153
154/*
155 * Map some physical address range into the kernel address space.
156 */
157/* Rewritten by Andreas Schwab to remove all races. */
158
159void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
160{
161 struct vm_struct *area;
162 unsigned long virtaddr, retaddr;
163 long offset;
164 pgd_t *pgd_dir;
165 p4d_t *p4d_dir;
166 pud_t *pud_dir;
167 pmd_t *pmd_dir;
168 pte_t *pte_dir;
169
170 /*
171 * Don't allow mappings that wrap..
172 */
173 if (!size || physaddr > (unsigned long)(-size))
174 return NULL;
175
176#ifdef CONFIG_AMIGA
177 if (MACH_IS_AMIGA) {
178 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
179 && (cacheflag == IOMAP_NOCACHE_SER))
180 return (void __iomem *)physaddr;
181 }
182#endif
183#ifdef CONFIG_COLDFIRE
184 if (__cf_internalio(physaddr))
185 return (void __iomem *) physaddr;
186#endif
187
188#ifdef DEBUG
189 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
190#endif
191 /*
192 * Mappings have to be aligned
193 */
194 offset = physaddr & (IO_SIZE - 1);
195 physaddr &= -IO_SIZE;
196 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
197
198 /*
199 * Ok, go for it..
200 */
201 area = get_io_area(size);
202 if (!area)
203 return NULL;
204
205 virtaddr = (unsigned long)area->addr;
206 retaddr = virtaddr + offset;
207#ifdef DEBUG
208 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
209#endif
210
211 /*
212 * add cache and table flags to physical address
213 */
214 if (CPU_IS_040_OR_060) {
215 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
216 _PAGE_ACCESSED | _PAGE_DIRTY);
217 switch (cacheflag) {
218 case IOMAP_FULL_CACHING:
219 physaddr |= _PAGE_CACHE040;
220 break;
221 case IOMAP_NOCACHE_SER:
222 default:
223 physaddr |= _PAGE_NOCACHE_S;
224 break;
225 case IOMAP_NOCACHE_NONSER:
226 physaddr |= _PAGE_NOCACHE;
227 break;
228 case IOMAP_WRITETHROUGH:
229 physaddr |= _PAGE_CACHE040W;
230 break;
231 }
232 } else {
233 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
234 _PAGE_DIRTY | _PAGE_READWRITE);
235 switch (cacheflag) {
236 case IOMAP_NOCACHE_SER:
237 case IOMAP_NOCACHE_NONSER:
238 default:
239 physaddr |= _PAGE_NOCACHE030;
240 break;
241 case IOMAP_FULL_CACHING:
242 case IOMAP_WRITETHROUGH:
243 break;
244 }
245 }
246
247 while ((long)size > 0) {
248#ifdef DEBUG
249 if (!(virtaddr & (PMD_SIZE-1)))
250 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
251#endif
252 pgd_dir = pgd_offset_k(virtaddr);
253 p4d_dir = p4d_offset(pgd_dir, virtaddr);
254 pud_dir = pud_offset(p4d_dir, virtaddr);
255 pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
256 if (!pmd_dir) {
257 printk("ioremap: no mem for pmd_dir\n");
258 return NULL;
259 }
260
261#if CONFIG_PGTABLE_LEVELS == 3
262 if (CPU_IS_020_OR_030) {
263 pmd_val(*pmd_dir) = physaddr;
264 physaddr += PMD_SIZE;
265 virtaddr += PMD_SIZE;
266 size -= PMD_SIZE;
267 } else
268#endif
269 {
270 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
271 if (!pte_dir) {
272 printk("ioremap: no mem for pte_dir\n");
273 return NULL;
274 }
275
276 pte_val(*pte_dir) = physaddr;
277 virtaddr += PAGE_SIZE;
278 physaddr += PAGE_SIZE;
279 size -= PAGE_SIZE;
280 }
281 }
282#ifdef DEBUG
283 printk("\n");
284#endif
285 flush_tlb_all();
286
287 return (void __iomem *)retaddr;
288}
289EXPORT_SYMBOL(__ioremap);
290
291/*
292 * Unmap an ioremap()ed region again
293 */
294void iounmap(void __iomem *addr)
295{
296#ifdef CONFIG_AMIGA
297 if ((!MACH_IS_AMIGA) ||
298 (((unsigned long)addr < 0x40000000) ||
299 ((unsigned long)addr > 0x60000000)))
300 free_io_area((__force void *)addr);
301#else
302#ifdef CONFIG_COLDFIRE
303 if (cf_internalio(addr))
304 return;
305#endif
306 free_io_area((__force void *)addr);
307#endif
308}
309EXPORT_SYMBOL(iounmap);
310
311/*
312 * Set new cache mode for some kernel address space.
313 * The caller must push data for that range itself, if such data may already
314 * be in the cache.
315 */
316void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
317{
318 unsigned long virtaddr = (unsigned long)addr;
319 pgd_t *pgd_dir;
320 p4d_t *p4d_dir;
321 pud_t *pud_dir;
322 pmd_t *pmd_dir;
323 pte_t *pte_dir;
324
325 if (CPU_IS_040_OR_060) {
326 switch (cmode) {
327 case IOMAP_FULL_CACHING:
328 cmode = _PAGE_CACHE040;
329 break;
330 case IOMAP_NOCACHE_SER:
331 default:
332 cmode = _PAGE_NOCACHE_S;
333 break;
334 case IOMAP_NOCACHE_NONSER:
335 cmode = _PAGE_NOCACHE;
336 break;
337 case IOMAP_WRITETHROUGH:
338 cmode = _PAGE_CACHE040W;
339 break;
340 }
341 } else {
342 switch (cmode) {
343 case IOMAP_NOCACHE_SER:
344 case IOMAP_NOCACHE_NONSER:
345 default:
346 cmode = _PAGE_NOCACHE030;
347 break;
348 case IOMAP_FULL_CACHING:
349 case IOMAP_WRITETHROUGH:
350 cmode = 0;
351 }
352 }
353
354 while ((long)size > 0) {
355 pgd_dir = pgd_offset_k(virtaddr);
356 p4d_dir = p4d_offset(pgd_dir, virtaddr);
357 pud_dir = pud_offset(p4d_dir, virtaddr);
358 if (pud_bad(*pud_dir)) {
359 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
360 pud_clear(pud_dir);
361 return;
362 }
363 pmd_dir = pmd_offset(pud_dir, virtaddr);
364
365#if CONFIG_PGTABLE_LEVELS == 3
366 if (CPU_IS_020_OR_030) {
367 unsigned long pmd = pmd_val(*pmd_dir);
368
369 if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
370 *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
371 virtaddr += PMD_SIZE;
372 size -= PMD_SIZE;
373 continue;
374 }
375 }
376#endif
377
378 if (pmd_bad(*pmd_dir)) {
379 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
380 pmd_clear(pmd_dir);
381 return;
382 }
383 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
384
385 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
386 virtaddr += PAGE_SIZE;
387 size -= PAGE_SIZE;
388 }
389
390 flush_tlb_all();
391}
392EXPORT_SYMBOL(kernel_set_cachemode);