Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/m68k/mm/kmap.c
  4 *
  5 *  Copyright (C) 1997 Roman Hodek
  6 *
  7 *  10/01/99 cleaned up the code and changing to the same interface
  8 *	     used by other architectures		/Roman Zippel
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/mm.h>
 13#include <linux/kernel.h>
 14#include <linux/string.h>
 15#include <linux/types.h>
 16#include <linux/slab.h>
 17#include <linux/vmalloc.h>
 18
 19#include <asm/setup.h>
 
 20#include <asm/page.h>
 
 21#include <asm/io.h>
 22#include <asm/tlbflush.h>
 23
 24#undef DEBUG
 25
 
 
 26/*
 27 * For 040/060 we can use the virtual memory area like other architectures,
 28 * but for 020/030 we want to use early termination page descriptors and we
 29 * can't mix this with normal page descriptors, so we have to copy that code
 30 * (mm/vmalloc.c) and return appropriately aligned addresses.
 31 */
 32
 33#ifdef CPU_M68040_OR_M68060_ONLY
 34
 35#define IO_SIZE		PAGE_SIZE
 36
 37static inline struct vm_struct *get_io_area(unsigned long size)
 38{
 39	return get_vm_area(size, VM_IOREMAP);
 40}
 41
 42
 43static inline void free_io_area(void *addr)
 44{
 45	vfree((void *)(PAGE_MASK & (unsigned long)addr));
 46}
 47
 48#else
 49
 50#define IO_SIZE		PMD_SIZE
 51
 52static struct vm_struct *iolist;
 53
 54/*
 55 * __free_io_area unmaps nearly everything, so be careful
 56 * Currently it doesn't free pointer/page tables anymore but this
 57 * wasn't used anyway and might be added later.
 58 */
 59static void __free_io_area(void *addr, unsigned long size)
 60{
 61	unsigned long virtaddr = (unsigned long)addr;
 62	pgd_t *pgd_dir;
 63	p4d_t *p4d_dir;
 64	pud_t *pud_dir;
 65	pmd_t *pmd_dir;
 66	pte_t *pte_dir;
 67
 68	while ((long)size > 0) {
 69		pgd_dir = pgd_offset_k(virtaddr);
 70		p4d_dir = p4d_offset(pgd_dir, virtaddr);
 71		pud_dir = pud_offset(p4d_dir, virtaddr);
 72		if (pud_bad(*pud_dir)) {
 73			printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
 74			pud_clear(pud_dir);
 75			return;
 76		}
 77		pmd_dir = pmd_offset(pud_dir, virtaddr);
 78
 79#if CONFIG_PGTABLE_LEVELS == 3
 80		if (CPU_IS_020_OR_030) {
 81			int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
 82
 83			if (pmd_type == _PAGE_PRESENT) {
 84				pmd_clear(pmd_dir);
 85				virtaddr += PMD_SIZE;
 86				size -= PMD_SIZE;
 87
 88			} else if (pmd_type == 0)
 89				continue;
 90		}
 91#endif
 92
 93		if (pmd_bad(*pmd_dir)) {
 94			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
 95			pmd_clear(pmd_dir);
 96			return;
 97		}
 98		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
 99
100		pte_val(*pte_dir) = 0;
101		virtaddr += PAGE_SIZE;
102		size -= PAGE_SIZE;
103	}
104
105	flush_tlb_all();
106}
107
108static struct vm_struct *get_io_area(unsigned long size)
109{
110	unsigned long addr;
111	struct vm_struct **p, *tmp, *area;
112
113	area = kmalloc(sizeof(*area), GFP_KERNEL);
114	if (!area)
115		return NULL;
116	addr = KMAP_START;
117	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
118		if (size + addr < (unsigned long)tmp->addr)
119			break;
120		if (addr > KMAP_END-size) {
121			kfree(area);
122			return NULL;
123		}
124		addr = tmp->size + (unsigned long)tmp->addr;
125	}
126	area->addr = (void *)addr;
127	area->size = size + IO_SIZE;
128	area->next = *p;
129	*p = area;
130	return area;
131}
132
133static inline void free_io_area(void *addr)
134{
135	struct vm_struct **p, *tmp;
136
137	if (!addr)
138		return;
139	addr = (void *)((unsigned long)addr & -IO_SIZE);
140	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
141		if (tmp->addr == addr) {
142			*p = tmp->next;
143			/* remove gap added in get_io_area() */
144			__free_io_area(tmp->addr, tmp->size - IO_SIZE);
145			kfree(tmp);
146			return;
147		}
148	}
149}
150
151#endif
152
153/*
154 * Map some physical address range into the kernel address space.
155 */
156/* Rewritten by Andreas Schwab to remove all races. */
157
158void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
159{
160	struct vm_struct *area;
161	unsigned long virtaddr, retaddr;
162	long offset;
163	pgd_t *pgd_dir;
164	p4d_t *p4d_dir;
165	pud_t *pud_dir;
166	pmd_t *pmd_dir;
167	pte_t *pte_dir;
168
169	/*
170	 * Don't allow mappings that wrap..
171	 */
172	if (!size || physaddr > (unsigned long)(-size))
173		return NULL;
174
175#ifdef CONFIG_AMIGA
176	if (MACH_IS_AMIGA) {
177		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
178		    && (cacheflag == IOMAP_NOCACHE_SER))
179			return (void __iomem *)physaddr;
180	}
181#endif
182#ifdef CONFIG_VIRT
183	if (MACH_IS_VIRT) {
184		if (physaddr >= 0xff000000 && cacheflag == IOMAP_NOCACHE_SER)
185			return (void __iomem *)physaddr;
186	}
187#endif
188#ifdef CONFIG_COLDFIRE
189	if (__cf_internalio(physaddr))
190		return (void __iomem *) physaddr;
191#endif
192
193#ifdef DEBUG
194	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
195#endif
196	/*
197	 * Mappings have to be aligned
198	 */
199	offset = physaddr & (IO_SIZE - 1);
200	physaddr &= -IO_SIZE;
201	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
202
203	/*
204	 * Ok, go for it..
205	 */
206	area = get_io_area(size);
207	if (!area)
208		return NULL;
209
210	virtaddr = (unsigned long)area->addr;
211	retaddr = virtaddr + offset;
212#ifdef DEBUG
213	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
214#endif
215
216	/*
217	 * add cache and table flags to physical address
218	 */
219	if (CPU_IS_040_OR_060) {
220		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
221			     _PAGE_ACCESSED | _PAGE_DIRTY);
222		switch (cacheflag) {
223		case IOMAP_FULL_CACHING:
224			physaddr |= _PAGE_CACHE040;
225			break;
226		case IOMAP_NOCACHE_SER:
227		default:
228			physaddr |= _PAGE_NOCACHE_S;
229			break;
230		case IOMAP_NOCACHE_NONSER:
231			physaddr |= _PAGE_NOCACHE;
232			break;
233		case IOMAP_WRITETHROUGH:
234			physaddr |= _PAGE_CACHE040W;
235			break;
236		}
237	} else {
238		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
239			     _PAGE_DIRTY | _PAGE_READWRITE);
240		switch (cacheflag) {
241		case IOMAP_NOCACHE_SER:
242		case IOMAP_NOCACHE_NONSER:
243		default:
244			physaddr |= _PAGE_NOCACHE030;
245			break;
246		case IOMAP_FULL_CACHING:
247		case IOMAP_WRITETHROUGH:
248			break;
249		}
250	}
251
252	while ((long)size > 0) {
253#ifdef DEBUG
254		if (!(virtaddr & (PMD_SIZE-1)))
255			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
256#endif
257		pgd_dir = pgd_offset_k(virtaddr);
258		p4d_dir = p4d_offset(pgd_dir, virtaddr);
259		pud_dir = pud_offset(p4d_dir, virtaddr);
260		pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
261		if (!pmd_dir) {
262			printk("ioremap: no mem for pmd_dir\n");
263			return NULL;
264		}
265
266#if CONFIG_PGTABLE_LEVELS == 3
267		if (CPU_IS_020_OR_030) {
268			pmd_val(*pmd_dir) = physaddr;
269			physaddr += PMD_SIZE;
270			virtaddr += PMD_SIZE;
271			size -= PMD_SIZE;
272		} else
273#endif
274		{
275			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
276			if (!pte_dir) {
277				printk("ioremap: no mem for pte_dir\n");
278				return NULL;
279			}
280
281			pte_val(*pte_dir) = physaddr;
282			virtaddr += PAGE_SIZE;
283			physaddr += PAGE_SIZE;
284			size -= PAGE_SIZE;
285		}
286	}
287#ifdef DEBUG
288	printk("\n");
289#endif
290	flush_tlb_all();
291
292	return (void __iomem *)retaddr;
293}
294EXPORT_SYMBOL(__ioremap);
295
296/*
297 * Unmap an ioremap()ed region again
298 */
299void iounmap(void __iomem *addr)
300{
301#ifdef CONFIG_AMIGA
302	if (MACH_IS_AMIGA &&
303	    ((unsigned long)addr >= 0x40000000) &&
304	    ((unsigned long)addr < 0x60000000))
305		return;
306#endif
307#ifdef CONFIG_VIRT
308	if (MACH_IS_VIRT && (unsigned long)addr >= 0xff000000)
309		return;
310#endif
311#ifdef CONFIG_COLDFIRE
312	if (cf_internalio(addr))
313		return;
314#endif
315	free_io_area((__force void *)addr);
 
316}
317EXPORT_SYMBOL(iounmap);
318
319/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320 * Set new cache mode for some kernel address space.
321 * The caller must push data for that range itself, if such data may already
322 * be in the cache.
323 */
324void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
325{
326	unsigned long virtaddr = (unsigned long)addr;
327	pgd_t *pgd_dir;
328	p4d_t *p4d_dir;
329	pud_t *pud_dir;
330	pmd_t *pmd_dir;
331	pte_t *pte_dir;
332
333	if (CPU_IS_040_OR_060) {
334		switch (cmode) {
335		case IOMAP_FULL_CACHING:
336			cmode = _PAGE_CACHE040;
337			break;
338		case IOMAP_NOCACHE_SER:
339		default:
340			cmode = _PAGE_NOCACHE_S;
341			break;
342		case IOMAP_NOCACHE_NONSER:
343			cmode = _PAGE_NOCACHE;
344			break;
345		case IOMAP_WRITETHROUGH:
346			cmode = _PAGE_CACHE040W;
347			break;
348		}
349	} else {
350		switch (cmode) {
351		case IOMAP_NOCACHE_SER:
352		case IOMAP_NOCACHE_NONSER:
353		default:
354			cmode = _PAGE_NOCACHE030;
355			break;
356		case IOMAP_FULL_CACHING:
357		case IOMAP_WRITETHROUGH:
358			cmode = 0;
359		}
360	}
361
362	while ((long)size > 0) {
363		pgd_dir = pgd_offset_k(virtaddr);
364		p4d_dir = p4d_offset(pgd_dir, virtaddr);
365		pud_dir = pud_offset(p4d_dir, virtaddr);
366		if (pud_bad(*pud_dir)) {
367			printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
368			pud_clear(pud_dir);
369			return;
370		}
371		pmd_dir = pmd_offset(pud_dir, virtaddr);
372
373#if CONFIG_PGTABLE_LEVELS == 3
374		if (CPU_IS_020_OR_030) {
375			unsigned long pmd = pmd_val(*pmd_dir);
376
377			if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
378				*pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
379				virtaddr += PMD_SIZE;
380				size -= PMD_SIZE;
 
381				continue;
382			}
383		}
384#endif
385
386		if (pmd_bad(*pmd_dir)) {
387			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
388			pmd_clear(pmd_dir);
389			return;
390		}
391		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
392
393		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
394		virtaddr += PAGE_SIZE;
395		size -= PAGE_SIZE;
396	}
397
398	flush_tlb_all();
399}
400EXPORT_SYMBOL(kernel_set_cachemode);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/m68k/mm/kmap.c
  4 *
  5 *  Copyright (C) 1997 Roman Hodek
  6 *
  7 *  10/01/99 cleaned up the code and changing to the same interface
  8 *	     used by other architectures		/Roman Zippel
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/mm.h>
 13#include <linux/kernel.h>
 14#include <linux/string.h>
 15#include <linux/types.h>
 16#include <linux/slab.h>
 17#include <linux/vmalloc.h>
 18
 19#include <asm/setup.h>
 20#include <asm/segment.h>
 21#include <asm/page.h>
 22#include <asm/pgalloc.h>
 23#include <asm/io.h>
 
 24
 25#undef DEBUG
 26
 27#define PTRTREESIZE	(256*1024)
 28
 29/*
 30 * For 040/060 we can use the virtual memory area like other architectures,
 31 * but for 020/030 we want to use early termination page descriptors and we
 32 * can't mix this with normal page descriptors, so we have to copy that code
 33 * (mm/vmalloc.c) and return appropriately aligned addresses.
 34 */
 35
 36#ifdef CPU_M68040_OR_M68060_ONLY
 37
 38#define IO_SIZE		PAGE_SIZE
 39
 40static inline struct vm_struct *get_io_area(unsigned long size)
 41{
 42	return get_vm_area(size, VM_IOREMAP);
 43}
 44
 45
 46static inline void free_io_area(void *addr)
 47{
 48	vfree((void *)(PAGE_MASK & (unsigned long)addr));
 49}
 50
 51#else
 52
 53#define IO_SIZE		(256*1024)
 54
 55static struct vm_struct *iolist;
 56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57static struct vm_struct *get_io_area(unsigned long size)
 58{
 59	unsigned long addr;
 60	struct vm_struct **p, *tmp, *area;
 61
 62	area = kmalloc(sizeof(*area), GFP_KERNEL);
 63	if (!area)
 64		return NULL;
 65	addr = KMAP_START;
 66	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
 67		if (size + addr < (unsigned long)tmp->addr)
 68			break;
 69		if (addr > KMAP_END-size) {
 70			kfree(area);
 71			return NULL;
 72		}
 73		addr = tmp->size + (unsigned long)tmp->addr;
 74	}
 75	area->addr = (void *)addr;
 76	area->size = size + IO_SIZE;
 77	area->next = *p;
 78	*p = area;
 79	return area;
 80}
 81
 82static inline void free_io_area(void *addr)
 83{
 84	struct vm_struct **p, *tmp;
 85
 86	if (!addr)
 87		return;
 88	addr = (void *)((unsigned long)addr & -IO_SIZE);
 89	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
 90		if (tmp->addr == addr) {
 91			*p = tmp->next;
 92			__iounmap(tmp->addr, tmp->size);
 
 93			kfree(tmp);
 94			return;
 95		}
 96	}
 97}
 98
 99#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108	struct vm_struct *area;
109	unsigned long virtaddr, retaddr;
110	long offset;
111	pgd_t *pgd_dir;
 
 
112	pmd_t *pmd_dir;
113	pte_t *pte_dir;
114
115	/*
116	 * Don't allow mappings that wrap..
117	 */
118	if (!size || physaddr > (unsigned long)(-size))
119		return NULL;
120
121#ifdef CONFIG_AMIGA
122	if (MACH_IS_AMIGA) {
123		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124		    && (cacheflag == IOMAP_NOCACHE_SER))
125			return (void __iomem *)physaddr;
126	}
127#endif
 
 
 
 
 
 
 
 
 
 
128
129#ifdef DEBUG
130	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132	/*
133	 * Mappings have to be aligned
134	 */
135	offset = physaddr & (IO_SIZE - 1);
136	physaddr &= -IO_SIZE;
137	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139	/*
140	 * Ok, go for it..
141	 */
142	area = get_io_area(size);
143	if (!area)
144		return NULL;
145
146	virtaddr = (unsigned long)area->addr;
147	retaddr = virtaddr + offset;
148#ifdef DEBUG
149	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152	/*
153	 * add cache and table flags to physical address
154	 */
155	if (CPU_IS_040_OR_060) {
156		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157			     _PAGE_ACCESSED | _PAGE_DIRTY);
158		switch (cacheflag) {
159		case IOMAP_FULL_CACHING:
160			physaddr |= _PAGE_CACHE040;
161			break;
162		case IOMAP_NOCACHE_SER:
163		default:
164			physaddr |= _PAGE_NOCACHE_S;
165			break;
166		case IOMAP_NOCACHE_NONSER:
167			physaddr |= _PAGE_NOCACHE;
168			break;
169		case IOMAP_WRITETHROUGH:
170			physaddr |= _PAGE_CACHE040W;
171			break;
172		}
173	} else {
174		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
175			     _PAGE_DIRTY | _PAGE_READWRITE);
176		switch (cacheflag) {
177		case IOMAP_NOCACHE_SER:
178		case IOMAP_NOCACHE_NONSER:
179		default:
180			physaddr |= _PAGE_NOCACHE030;
181			break;
182		case IOMAP_FULL_CACHING:
183		case IOMAP_WRITETHROUGH:
184			break;
185		}
186	}
187
188	while ((long)size > 0) {
189#ifdef DEBUG
190		if (!(virtaddr & (PTRTREESIZE-1)))
191			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
192#endif
193		pgd_dir = pgd_offset_k(virtaddr);
194		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
 
 
195		if (!pmd_dir) {
196			printk("ioremap: no mem for pmd_dir\n");
197			return NULL;
198		}
199
 
200		if (CPU_IS_020_OR_030) {
201			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
202			physaddr += PTRTREESIZE;
203			virtaddr += PTRTREESIZE;
204			size -= PTRTREESIZE;
205		} else {
 
 
206			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
207			if (!pte_dir) {
208				printk("ioremap: no mem for pte_dir\n");
209				return NULL;
210			}
211
212			pte_val(*pte_dir) = physaddr;
213			virtaddr += PAGE_SIZE;
214			physaddr += PAGE_SIZE;
215			size -= PAGE_SIZE;
216		}
217	}
218#ifdef DEBUG
219	printk("\n");
220#endif
221	flush_tlb_all();
222
223	return (void __iomem *)retaddr;
224}
225EXPORT_SYMBOL(__ioremap);
226
227/*
228 * Unmap an ioremap()ed region again
229 */
230void iounmap(void __iomem *addr)
231{
232#ifdef CONFIG_AMIGA
233	if ((!MACH_IS_AMIGA) ||
234	    (((unsigned long)addr < 0x40000000) ||
235	     ((unsigned long)addr > 0x60000000)))
236			free_io_area((__force void *)addr);
237#else
 
 
 
 
 
 
 
 
238	free_io_area((__force void *)addr);
239#endif
240}
241EXPORT_SYMBOL(iounmap);
242
243/*
244 * __iounmap unmaps nearly everything, so be careful
245 * Currently it doesn't free pointer/page tables anymore but this
246 * wasn't used anyway and might be added later.
247 */
248void __iounmap(void *addr, unsigned long size)
249{
250	unsigned long virtaddr = (unsigned long)addr;
251	pgd_t *pgd_dir;
252	pmd_t *pmd_dir;
253	pte_t *pte_dir;
254
255	while ((long)size > 0) {
256		pgd_dir = pgd_offset_k(virtaddr);
257		if (pgd_bad(*pgd_dir)) {
258			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
259			pgd_clear(pgd_dir);
260			return;
261		}
262		pmd_dir = pmd_offset(pgd_dir, virtaddr);
263
264		if (CPU_IS_020_OR_030) {
265			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
266			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
267
268			if (pmd_type == _PAGE_PRESENT) {
269				pmd_dir->pmd[pmd_off] = 0;
270				virtaddr += PTRTREESIZE;
271				size -= PTRTREESIZE;
272				continue;
273			} else if (pmd_type == 0)
274				continue;
275		}
276
277		if (pmd_bad(*pmd_dir)) {
278			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
279			pmd_clear(pmd_dir);
280			return;
281		}
282		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
283
284		pte_val(*pte_dir) = 0;
285		virtaddr += PAGE_SIZE;
286		size -= PAGE_SIZE;
287	}
288
289	flush_tlb_all();
290}
291
292/*
293 * Set new cache mode for some kernel address space.
294 * The caller must push data for that range itself, if such data may already
295 * be in the cache.
296 */
297void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
298{
299	unsigned long virtaddr = (unsigned long)addr;
300	pgd_t *pgd_dir;
 
 
301	pmd_t *pmd_dir;
302	pte_t *pte_dir;
303
304	if (CPU_IS_040_OR_060) {
305		switch (cmode) {
306		case IOMAP_FULL_CACHING:
307			cmode = _PAGE_CACHE040;
308			break;
309		case IOMAP_NOCACHE_SER:
310		default:
311			cmode = _PAGE_NOCACHE_S;
312			break;
313		case IOMAP_NOCACHE_NONSER:
314			cmode = _PAGE_NOCACHE;
315			break;
316		case IOMAP_WRITETHROUGH:
317			cmode = _PAGE_CACHE040W;
318			break;
319		}
320	} else {
321		switch (cmode) {
322		case IOMAP_NOCACHE_SER:
323		case IOMAP_NOCACHE_NONSER:
324		default:
325			cmode = _PAGE_NOCACHE030;
326			break;
327		case IOMAP_FULL_CACHING:
328		case IOMAP_WRITETHROUGH:
329			cmode = 0;
330		}
331	}
332
333	while ((long)size > 0) {
334		pgd_dir = pgd_offset_k(virtaddr);
335		if (pgd_bad(*pgd_dir)) {
336			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
337			pgd_clear(pgd_dir);
 
 
338			return;
339		}
340		pmd_dir = pmd_offset(pgd_dir, virtaddr);
341
 
342		if (CPU_IS_020_OR_030) {
343			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
344
345			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
346				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
347							 _CACHEMASK040) | cmode;
348				virtaddr += PTRTREESIZE;
349				size -= PTRTREESIZE;
350				continue;
351			}
352		}
 
353
354		if (pmd_bad(*pmd_dir)) {
355			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
356			pmd_clear(pmd_dir);
357			return;
358		}
359		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
360
361		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
362		virtaddr += PAGE_SIZE;
363		size -= PAGE_SIZE;
364	}
365
366	flush_tlb_all();
367}
368EXPORT_SYMBOL(kernel_set_cachemode);