Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/m68k/mm/memory.c
  4 *
  5 *  Copyright (C) 1995  Hamish Macdonald
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/mm.h>
 10#include <linux/kernel.h>
 11#include <linux/string.h>
 12#include <linux/types.h>
 13#include <linux/init.h>
 14#include <linux/pagemap.h>
 15#include <linux/gfp.h>
 16
 17#include <asm/setup.h>
 18#include <asm/segment.h>
 19#include <asm/page.h>
 20#include <asm/pgalloc.h>
 21#include <asm/traps.h>
 22#include <asm/machdep.h>
 23
 24
 25/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
 26   struct page instead of separately kmalloced struct.  Stolen from
 27   arch/sparc/mm/srmmu.c ... */
 28
 29typedef struct list_head ptable_desc;
 30static LIST_HEAD(ptable_list);
 31
 32#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
 33#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
 34#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
 35
 36#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
 37
 38void __init init_pointer_table(unsigned long ptable)
 39{
 40	ptable_desc *dp;
 41	unsigned long page = ptable & PAGE_MASK;
 42	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
 43
 44	dp = PD_PTABLE(page);
 45	if (!(PD_MARKBITS(dp) & mask)) {
 46		PD_MARKBITS(dp) = 0xff;
 47		list_add(dp, &ptable_list);
 48	}
 49
 50	PD_MARKBITS(dp) &= ~mask;
 51	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
 
 
 52
 53	/* unreserve the page so it's possible to free that page */
 54	__ClearPageReserved(PD_PAGE(dp));
 55	init_page_count(PD_PAGE(dp));
 56
 57	return;
 58}
 59
 60pmd_t *get_pointer_table (void)
 61{
 62	ptable_desc *dp = ptable_list.next;
 63	unsigned char mask = PD_MARKBITS (dp);
 64	unsigned char tmp;
 65	unsigned int off;
 66
 67	/*
 68	 * For a pointer table for a user process address space, a
 69	 * table is taken from a page allocated for the purpose.  Each
 70	 * page can hold 8 pointer tables.  The page is remapped in
 71	 * virtual address space to be noncacheable.
 72	 */
 73	if (mask == 0) {
 74		void *page;
 75		ptable_desc *new;
 76
 77		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
 78			return NULL;
 79
 80		flush_tlb_kernel_page(page);
 81		nocache_page(page);
 82
 83		new = PD_PTABLE(page);
 84		PD_MARKBITS(new) = 0xfe;
 85		list_add_tail(new, dp);
 86
 87		return (pmd_t *)page;
 88	}
 89
 90	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
 91		;
 92	PD_MARKBITS(dp) = mask & ~tmp;
 93	if (!PD_MARKBITS(dp)) {
 94		/* move to end of list */
 95		list_move_tail(dp, &ptable_list);
 96	}
 97	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
 98}
 99
100int free_pointer_table (pmd_t *ptable)
101{
102	ptable_desc *dp;
103	unsigned long page = (unsigned long)ptable & PAGE_MASK;
104	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
105
106	dp = PD_PTABLE(page);
107	if (PD_MARKBITS (dp) & mask)
108		panic ("table already free!");
109
110	PD_MARKBITS (dp) |= mask;
111
112	if (PD_MARKBITS(dp) == 0xff) {
113		/* all tables in page are free, free page */
114		list_del(dp);
115		cache_page((void *)page);
116		free_page (page);
117		return 1;
118	} else if (ptable_list.next != dp) {
119		/*
120		 * move this descriptor to the front of the list, since
121		 * it has one or more free tables.
122		 */
123		list_move(dp, &ptable_list);
124	}
125	return 0;
126}
127
128/* invalidate page in both caches */
129static inline void clear040(unsigned long paddr)
130{
131	asm volatile (
132		"nop\n\t"
133		".chip 68040\n\t"
134		"cinvp %%bc,(%0)\n\t"
135		".chip 68k"
136		: : "a" (paddr));
137}
138
139/* invalidate page in i-cache */
140static inline void cleari040(unsigned long paddr)
141{
142	asm volatile (
143		"nop\n\t"
144		".chip 68040\n\t"
145		"cinvp %%ic,(%0)\n\t"
146		".chip 68k"
147		: : "a" (paddr));
148}
149
150/* push page in both caches */
151/* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */
152static inline void push040(unsigned long paddr)
153{
154	asm volatile (
155		"nop\n\t"
156		".chip 68040\n\t"
157		"cpushp %%bc,(%0)\n\t"
158		".chip 68k"
159		: : "a" (paddr));
160}
161
162/* push and invalidate page in both caches, must disable ints
163 * to avoid invalidating valid data */
164static inline void pushcl040(unsigned long paddr)
165{
166	unsigned long flags;
167
168	local_irq_save(flags);
169	push040(paddr);
170	if (CPU_IS_060)
171		clear040(paddr);
172	local_irq_restore(flags);
173}
174
175/*
176 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
177 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
178 * Hit every page until there is a page or less to go. Hit the next page,
179 * and the one after that if the range hits it.
180 */
181/* ++roman: A little bit more care is required here: The CINVP instruction
182 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
183 * and the end of the region must be treated differently if they are not
184 * exactly at the beginning or end of a page boundary. Else, maybe too much
185 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
186 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
187 * for discovering the problem!)
188 */
189/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
190 * the DPI bit in the CACR; would it cause problems with temporarily changing
191 * this?). So we have to push first and then additionally to invalidate.
192 */
193
194
195/*
196 * cache_clear() semantics: Clear any cache entries for the area in question,
197 * without writing back dirty entries first. This is useful if the data will
198 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
199 * _physical_ address.
200 */
201
202void cache_clear (unsigned long paddr, int len)
203{
204    if (CPU_IS_COLDFIRE) {
205	clear_cf_bcache(0, DCACHE_MAX_ADDR);
206    } else if (CPU_IS_040_OR_060) {
207	int tmp;
208
209	/*
210	 * We need special treatment for the first page, in case it
211	 * is not page-aligned. Page align the addresses to work
212	 * around bug I17 in the 68060.
213	 */
214	if ((tmp = -paddr & (PAGE_SIZE - 1))) {
215	    pushcl040(paddr & PAGE_MASK);
216	    if ((len -= tmp) <= 0)
217		return;
218	    paddr += tmp;
219	}
220	tmp = PAGE_SIZE;
221	paddr &= PAGE_MASK;
222	while ((len -= tmp) >= 0) {
223	    clear040(paddr);
224	    paddr += tmp;
225	}
226	if ((len += tmp))
227	    /* a page boundary gets crossed at the end */
228	    pushcl040(paddr);
229    }
230    else /* 68030 or 68020 */
231	asm volatile ("movec %/cacr,%/d0\n\t"
232		      "oriw %0,%/d0\n\t"
233		      "movec %/d0,%/cacr"
234		      : : "i" (FLUSH_I_AND_D)
235		      : "d0");
236#ifdef CONFIG_M68K_L2_CACHE
237    if(mach_l2_flush)
238	mach_l2_flush(0);
239#endif
240}
241EXPORT_SYMBOL(cache_clear);
242
243
244/*
245 * cache_push() semantics: Write back any dirty cache data in the given area,
246 * and invalidate the range in the instruction cache. It needs not (but may)
247 * invalidate those entries also in the data cache. The range is defined by a
248 * _physical_ address.
249 */
250
251void cache_push (unsigned long paddr, int len)
252{
253    if (CPU_IS_COLDFIRE) {
254	flush_cf_bcache(0, DCACHE_MAX_ADDR);
255    } else if (CPU_IS_040_OR_060) {
256	int tmp = PAGE_SIZE;
257
258	/*
259         * on 68040 or 68060, push cache lines for pages in the range;
260	 * on the '040 this also invalidates the pushed lines, but not on
261	 * the '060!
262	 */
263	len += paddr & (PAGE_SIZE - 1);
264
265	/*
266	 * Work around bug I17 in the 68060 affecting some instruction
267	 * lines not being invalidated properly.
268	 */
269	paddr &= PAGE_MASK;
270
271	do {
272	    push040(paddr);
273	    paddr += tmp;
274	} while ((len -= tmp) > 0);
275    }
276    /*
277     * 68030/68020 have no writeback cache. On the other hand,
278     * cache_push is actually a superset of cache_clear (the lines
279     * get written back and invalidated), so we should make sure
280     * to perform the corresponding actions. After all, this is getting
281     * called in places where we've just loaded code, or whatever, so
282     * flushing the icache is appropriate; flushing the dcache shouldn't
283     * be required.
284     */
285    else /* 68030 or 68020 */
286	asm volatile ("movec %/cacr,%/d0\n\t"
287		      "oriw %0,%/d0\n\t"
288		      "movec %/d0,%/cacr"
289		      : : "i" (FLUSH_I)
290		      : "d0");
291#ifdef CONFIG_M68K_L2_CACHE
292    if(mach_l2_flush)
293	mach_l2_flush(1);
294#endif
295}
296EXPORT_SYMBOL(cache_push);
297
v4.6
 
  1/*
  2 *  linux/arch/m68k/mm/memory.c
  3 *
  4 *  Copyright (C) 1995  Hamish Macdonald
  5 */
  6
  7#include <linux/module.h>
  8#include <linux/mm.h>
  9#include <linux/kernel.h>
 10#include <linux/string.h>
 11#include <linux/types.h>
 12#include <linux/init.h>
 13#include <linux/pagemap.h>
 14#include <linux/gfp.h>
 15
 16#include <asm/setup.h>
 17#include <asm/segment.h>
 18#include <asm/page.h>
 19#include <asm/pgalloc.h>
 20#include <asm/traps.h>
 21#include <asm/machdep.h>
 22
 23
 24/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
 25   struct page instead of separately kmalloced struct.  Stolen from
 26   arch/sparc/mm/srmmu.c ... */
 27
 28typedef struct list_head ptable_desc;
 29static LIST_HEAD(ptable_list);
 30
 31#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
 32#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
 33#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
 34
 35#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
 36
 37void __init init_pointer_table(unsigned long ptable)
 38{
 39	ptable_desc *dp;
 40	unsigned long page = ptable & PAGE_MASK;
 41	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
 42
 43	dp = PD_PTABLE(page);
 44	if (!(PD_MARKBITS(dp) & mask)) {
 45		PD_MARKBITS(dp) = 0xff;
 46		list_add(dp, &ptable_list);
 47	}
 48
 49	PD_MARKBITS(dp) &= ~mask;
 50#ifdef DEBUG
 51	printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
 52#endif
 53
 54	/* unreserve the page so it's possible to free that page */
 55	PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
 56	init_page_count(PD_PAGE(dp));
 57
 58	return;
 59}
 60
 61pmd_t *get_pointer_table (void)
 62{
 63	ptable_desc *dp = ptable_list.next;
 64	unsigned char mask = PD_MARKBITS (dp);
 65	unsigned char tmp;
 66	unsigned int off;
 67
 68	/*
 69	 * For a pointer table for a user process address space, a
 70	 * table is taken from a page allocated for the purpose.  Each
 71	 * page can hold 8 pointer tables.  The page is remapped in
 72	 * virtual address space to be noncacheable.
 73	 */
 74	if (mask == 0) {
 75		void *page;
 76		ptable_desc *new;
 77
 78		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
 79			return NULL;
 80
 81		flush_tlb_kernel_page(page);
 82		nocache_page(page);
 83
 84		new = PD_PTABLE(page);
 85		PD_MARKBITS(new) = 0xfe;
 86		list_add_tail(new, dp);
 87
 88		return (pmd_t *)page;
 89	}
 90
 91	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
 92		;
 93	PD_MARKBITS(dp) = mask & ~tmp;
 94	if (!PD_MARKBITS(dp)) {
 95		/* move to end of list */
 96		list_move_tail(dp, &ptable_list);
 97	}
 98	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
 99}
100
101int free_pointer_table (pmd_t *ptable)
102{
103	ptable_desc *dp;
104	unsigned long page = (unsigned long)ptable & PAGE_MASK;
105	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
106
107	dp = PD_PTABLE(page);
108	if (PD_MARKBITS (dp) & mask)
109		panic ("table already free!");
110
111	PD_MARKBITS (dp) |= mask;
112
113	if (PD_MARKBITS(dp) == 0xff) {
114		/* all tables in page are free, free page */
115		list_del(dp);
116		cache_page((void *)page);
117		free_page (page);
118		return 1;
119	} else if (ptable_list.next != dp) {
120		/*
121		 * move this descriptor to the front of the list, since
122		 * it has one or more free tables.
123		 */
124		list_move(dp, &ptable_list);
125	}
126	return 0;
127}
128
129/* invalidate page in both caches */
130static inline void clear040(unsigned long paddr)
131{
132	asm volatile (
133		"nop\n\t"
134		".chip 68040\n\t"
135		"cinvp %%bc,(%0)\n\t"
136		".chip 68k"
137		: : "a" (paddr));
138}
139
140/* invalidate page in i-cache */
141static inline void cleari040(unsigned long paddr)
142{
143	asm volatile (
144		"nop\n\t"
145		".chip 68040\n\t"
146		"cinvp %%ic,(%0)\n\t"
147		".chip 68k"
148		: : "a" (paddr));
149}
150
151/* push page in both caches */
152/* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */
153static inline void push040(unsigned long paddr)
154{
155	asm volatile (
156		"nop\n\t"
157		".chip 68040\n\t"
158		"cpushp %%bc,(%0)\n\t"
159		".chip 68k"
160		: : "a" (paddr));
161}
162
163/* push and invalidate page in both caches, must disable ints
164 * to avoid invalidating valid data */
165static inline void pushcl040(unsigned long paddr)
166{
167	unsigned long flags;
168
169	local_irq_save(flags);
170	push040(paddr);
171	if (CPU_IS_060)
172		clear040(paddr);
173	local_irq_restore(flags);
174}
175
176/*
177 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
178 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
179 * Hit every page until there is a page or less to go. Hit the next page,
180 * and the one after that if the range hits it.
181 */
182/* ++roman: A little bit more care is required here: The CINVP instruction
183 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
184 * and the end of the region must be treated differently if they are not
185 * exactly at the beginning or end of a page boundary. Else, maybe too much
186 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
187 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
188 * for discovering the problem!)
189 */
190/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
191 * the DPI bit in the CACR; would it cause problems with temporarily changing
192 * this?). So we have to push first and then additionally to invalidate.
193 */
194
195
196/*
197 * cache_clear() semantics: Clear any cache entries for the area in question,
198 * without writing back dirty entries first. This is useful if the data will
199 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
200 * _physical_ address.
201 */
202
203void cache_clear (unsigned long paddr, int len)
204{
205    if (CPU_IS_COLDFIRE) {
206	clear_cf_bcache(0, DCACHE_MAX_ADDR);
207    } else if (CPU_IS_040_OR_060) {
208	int tmp;
209
210	/*
211	 * We need special treatment for the first page, in case it
212	 * is not page-aligned. Page align the addresses to work
213	 * around bug I17 in the 68060.
214	 */
215	if ((tmp = -paddr & (PAGE_SIZE - 1))) {
216	    pushcl040(paddr & PAGE_MASK);
217	    if ((len -= tmp) <= 0)
218		return;
219	    paddr += tmp;
220	}
221	tmp = PAGE_SIZE;
222	paddr &= PAGE_MASK;
223	while ((len -= tmp) >= 0) {
224	    clear040(paddr);
225	    paddr += tmp;
226	}
227	if ((len += tmp))
228	    /* a page boundary gets crossed at the end */
229	    pushcl040(paddr);
230    }
231    else /* 68030 or 68020 */
232	asm volatile ("movec %/cacr,%/d0\n\t"
233		      "oriw %0,%/d0\n\t"
234		      "movec %/d0,%/cacr"
235		      : : "i" (FLUSH_I_AND_D)
236		      : "d0");
237#ifdef CONFIG_M68K_L2_CACHE
238    if(mach_l2_flush)
239	mach_l2_flush(0);
240#endif
241}
242EXPORT_SYMBOL(cache_clear);
243
244
245/*
246 * cache_push() semantics: Write back any dirty cache data in the given area,
247 * and invalidate the range in the instruction cache. It needs not (but may)
248 * invalidate those entries also in the data cache. The range is defined by a
249 * _physical_ address.
250 */
251
252void cache_push (unsigned long paddr, int len)
253{
254    if (CPU_IS_COLDFIRE) {
255	flush_cf_bcache(0, DCACHE_MAX_ADDR);
256    } else if (CPU_IS_040_OR_060) {
257	int tmp = PAGE_SIZE;
258
259	/*
260         * on 68040 or 68060, push cache lines for pages in the range;
261	 * on the '040 this also invalidates the pushed lines, but not on
262	 * the '060!
263	 */
264	len += paddr & (PAGE_SIZE - 1);
265
266	/*
267	 * Work around bug I17 in the 68060 affecting some instruction
268	 * lines not being invalidated properly.
269	 */
270	paddr &= PAGE_MASK;
271
272	do {
273	    push040(paddr);
274	    paddr += tmp;
275	} while ((len -= tmp) > 0);
276    }
277    /*
278     * 68030/68020 have no writeback cache. On the other hand,
279     * cache_push is actually a superset of cache_clear (the lines
280     * get written back and invalidated), so we should make sure
281     * to perform the corresponding actions. After all, this is getting
282     * called in places where we've just loaded code, or whatever, so
283     * flushing the icache is appropriate; flushing the dcache shouldn't
284     * be required.
285     */
286    else /* 68030 or 68020 */
287	asm volatile ("movec %/cacr,%/d0\n\t"
288		      "oriw %0,%/d0\n\t"
289		      "movec %/d0,%/cacr"
290		      : : "i" (FLUSH_I)
291		      : "d0");
292#ifdef CONFIG_M68K_L2_CACHE
293    if(mach_l2_flush)
294	mach_l2_flush(1);
295#endif
296}
297EXPORT_SYMBOL(cache_push);
298