Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3** Tablewalk MMU emulator
  4**
  5** by Toshiyasu Morita
  6**
  7** Started 1/16/98 @ 2:22 am
  8*/
  9
 10#include <linux/init.h>
 11#include <linux/mman.h>
 12#include <linux/mm.h>
 13#include <linux/kernel.h>
 14#include <linux/ptrace.h>
 15#include <linux/delay.h>
 16#include <linux/memblock.h>
 17#include <linux/bitops.h>
 18#include <linux/module.h>
 19#include <linux/sched/mm.h>
 20
 21#include <asm/setup.h>
 22#include <asm/traps.h>
 23#include <linux/uaccess.h>
 24#include <asm/page.h>
 
 25#include <asm/sun3mmu.h>
 
 26#include <asm/oplib.h>
 27#include <asm/mmu_context.h>
 28#include <asm/dvma.h>
 29
 30
 31#undef DEBUG_MMU_EMU
 32#define DEBUG_PROM_MAPS
 33
 34/*
 35** Defines
 36*/
 37
 38#define CONTEXTS_NUM		8
 39#define SEGMAPS_PER_CONTEXT_NUM 2048
 40#define PAGES_PER_SEGMENT	16
 41#define PMEGS_NUM		256
 42#define PMEG_MASK		0xFF
 43
 44/*
 45** Globals
 46*/
 47
 48unsigned long m68k_vmalloc_end;
 49EXPORT_SYMBOL(m68k_vmalloc_end);
 50
 51unsigned long pmeg_vaddr[PMEGS_NUM];
 52unsigned char pmeg_alloc[PMEGS_NUM];
 53unsigned char pmeg_ctx[PMEGS_NUM];
 54
 55/* pointers to the mm structs for each task in each
 56   context. 0xffffffff is a marker for kernel context */
 57static struct mm_struct *ctx_alloc[CONTEXTS_NUM] = {
 58    [0] = (struct mm_struct *)0xffffffff
 59};
 60
 61/* has this context been mmdrop'd? */
 62static unsigned char ctx_avail = CONTEXTS_NUM-1;
 63
 64/* array of pages to be marked off for the rom when we do mem_init later */
 65/* 256 pages lets the rom take up to 2mb of physical ram..  I really
 66   hope it never wants mote than that. */
 67unsigned long rom_pages[256];
 68
 69/* Print a PTE value in symbolic form. For debugging. */
 70void print_pte (pte_t pte)
 71{
 72#if 0
 73	/* Verbose version. */
 74	unsigned long val = pte_val (pte);
 75	pr_cont(" pte=%lx [addr=%lx",
 76		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
 77	if (val & SUN3_PAGE_VALID)	pr_cont(" valid");
 78	if (val & SUN3_PAGE_WRITEABLE)	pr_cont(" write");
 79	if (val & SUN3_PAGE_SYSTEM)	pr_cont(" sys");
 80	if (val & SUN3_PAGE_NOCACHE)	pr_cont(" nocache");
 81	if (val & SUN3_PAGE_ACCESSED)	pr_cont(" accessed");
 82	if (val & SUN3_PAGE_MODIFIED)	pr_cont(" modified");
 83	switch (val & SUN3_PAGE_TYPE_MASK) {
 84		case SUN3_PAGE_TYPE_MEMORY: pr_cont(" memory"); break;
 85		case SUN3_PAGE_TYPE_IO:     pr_cont(" io");     break;
 86		case SUN3_PAGE_TYPE_VME16:  pr_cont(" vme16");  break;
 87		case SUN3_PAGE_TYPE_VME32:  pr_cont(" vme32");  break;
 88	}
 89	pr_cont("]\n");
 90#else
 91	/* Terse version. More likely to fit on a line. */
 92	unsigned long val = pte_val (pte);
 93	char flags[7], *type;
 94
 95	flags[0] = (val & SUN3_PAGE_VALID)     ? 'v' : '-';
 96	flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-';
 97	flags[2] = (val & SUN3_PAGE_SYSTEM)    ? 's' : '-';
 98	flags[3] = (val & SUN3_PAGE_NOCACHE)   ? 'x' : '-';
 99	flags[4] = (val & SUN3_PAGE_ACCESSED)  ? 'a' : '-';
100	flags[5] = (val & SUN3_PAGE_MODIFIED)  ? 'm' : '-';
101	flags[6] = '\0';
102
103	switch (val & SUN3_PAGE_TYPE_MASK) {
104		case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break;
105		case SUN3_PAGE_TYPE_IO:     type = "io"    ; break;
106		case SUN3_PAGE_TYPE_VME16:  type = "vme16" ; break;
107		case SUN3_PAGE_TYPE_VME32:  type = "vme32" ; break;
108		default: type = "unknown?"; break;
109	}
110
111	pr_cont(" pte=%08lx [%07lx %s %s]\n",
112		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
113#endif
114}
115
116/* Print the PTE value for a given virtual address. For debugging. */
117void print_pte_vaddr (unsigned long vaddr)
118{
119	pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
120	print_pte (__pte (sun3_get_pte (vaddr)));
121}
122
123/*
124 * Initialise the MMU emulator.
125 */
126void __init mmu_emu_init(unsigned long bootmem_end)
127{
128	unsigned long seg, num;
129	int i,j;
130
131	memset(rom_pages, 0, sizeof(rom_pages));
132	memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr));
133	memset(pmeg_alloc, 0, sizeof(pmeg_alloc));
134	memset(pmeg_ctx, 0, sizeof(pmeg_ctx));
135
136	/* pmeg align the end of bootmem, adding another pmeg,
137	 * later bootmem allocations will likely need it */
138	bootmem_end = (bootmem_end + (2 * SUN3_PMEG_SIZE)) & ~SUN3_PMEG_MASK;
139
140	/* mark all of the pmegs used thus far as reserved */
141	for (i=0; i < __pa(bootmem_end) / SUN3_PMEG_SIZE ; ++i)
142		pmeg_alloc[i] = 2;
143
144
145	/* I'm thinking that most of the top pmeg's are going to be
146	   used for something, and we probably shouldn't risk it */
147	for(num = 0xf0; num <= 0xff; num++)
148		pmeg_alloc[num] = 2;
149
150	/* liberate all existing mappings in the rest of kernel space */
151	for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) {
152		i = sun3_get_segmap(seg);
153
154		if(!pmeg_alloc[i]) {
155#ifdef DEBUG_MMU_EMU
156			pr_info("freed:");
157			print_pte_vaddr (seg);
158#endif
159			sun3_put_segmap(seg, SUN3_INVALID_PMEG);
160		}
161	}
162
163	j = 0;
164	for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
165		if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
166#ifdef DEBUG_PROM_MAPS
167			for(i = 0; i < 16; i++) {
168				pr_info("mapped:");
169				print_pte_vaddr (seg + (i*PAGE_SIZE));
170				break;
171			}
172#endif
173			// the lowest mapping here is the end of our
174			// vmalloc region
175			if (!m68k_vmalloc_end)
176				m68k_vmalloc_end = seg;
177
178			// mark the segmap alloc'd, and reserve any
179			// of the first 0xbff pages the hardware is
180			// already using...  does any sun3 support > 24mb?
181			pmeg_alloc[sun3_get_segmap(seg)] = 2;
182		}
183	}
184
185	dvma_init();
186
187
188	/* blank everything below the kernel, and we've got the base
189	   mapping to start all the contexts off with... */
190	for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
191		sun3_put_segmap(seg, SUN3_INVALID_PMEG);
192
193	set_fc(3);
194	for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
195		i = sun3_get_segmap(seg);
196		for(j = 1; j < CONTEXTS_NUM; j++)
197			(*(romvec->pv_setctxt))(j, (void *)seg, i);
198	}
199	set_fc(USER_DATA);
 
200}
201
202/* erase the mappings for a dead context.  Uses the pg_dir for hints
203   as the pmeg tables proved somewhat unreliable, and unmapping all of
204   TASK_SIZE was much slower and no more stable. */
205/* todo: find a better way to keep track of the pmegs used by a
206   context for when they're cleared */
207void clear_context(unsigned long context)
208{
209     unsigned char oldctx;
210     unsigned long i;
211
212     if(context) {
213	     if(!ctx_alloc[context])
214		     panic("%s: context not allocated\n", __func__);
215
216	     ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
217	     ctx_alloc[context] = (struct mm_struct *)0;
218	     ctx_avail++;
219     }
220
221     oldctx = sun3_get_context();
222
223     sun3_put_context(context);
224
225     for(i = 0; i < SUN3_INVALID_PMEG; i++) {
226	     if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) {
227		     sun3_put_segmap(pmeg_vaddr[i], SUN3_INVALID_PMEG);
228		     pmeg_ctx[i] = 0;
229		     pmeg_alloc[i] = 0;
230		     pmeg_vaddr[i] = 0;
231	     }
232     }
233
234     sun3_put_context(oldctx);
235}
236
237/* gets an empty context.  if full, kills the next context listed to
238   die first */
239/* This context invalidation scheme is, well, totally arbitrary, I'm
240   sure it could be much more intelligent...  but it gets the job done
241   for now without much overhead in making it's decision. */
242/* todo: come up with optimized scheme for flushing contexts */
243unsigned long get_free_context(struct mm_struct *mm)
244{
245	unsigned long new = 1;
246	static unsigned char next_to_die = 1;
247
248	if(!ctx_avail) {
249		/* kill someone to get our context */
250		new = next_to_die;
251		clear_context(new);
252		next_to_die = (next_to_die + 1) & 0x7;
253		if(!next_to_die)
254			next_to_die++;
255	} else {
256		while(new < CONTEXTS_NUM) {
257			if(ctx_alloc[new])
258				new++;
259			else
260				break;
261		}
262		// check to make sure one was really free...
263		if(new == CONTEXTS_NUM)
264			panic("%s: failed to find free context", __func__);
265	}
266
267	ctx_alloc[new] = mm;
268	ctx_avail--;
269
270	return new;
271}
272
273/*
274 * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
275 * `context'. Maintain internal PMEG management structures. This doesn't
276 * actually map the physical address, but does clear the old mappings.
277 */
278//todo: better allocation scheme? but is extra complexity worthwhile?
279//todo: only clear old entries if necessary? how to tell?
280
281inline void mmu_emu_map_pmeg (int context, int vaddr)
282{
283	static unsigned char curr_pmeg = 128;
284	int i;
285
286	/* Round address to PMEG boundary. */
287	vaddr &= ~SUN3_PMEG_MASK;
288
289	/* Find a spare one. */
290	while (pmeg_alloc[curr_pmeg] == 2)
291		++curr_pmeg;
292
293
294#ifdef DEBUG_MMU_EMU
295	pr_info("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
296		curr_pmeg, context, vaddr);
297#endif
298
299	/* Invalidate old mapping for the pmeg, if any */
300	if (pmeg_alloc[curr_pmeg] == 1) {
301		sun3_put_context(pmeg_ctx[curr_pmeg]);
302		sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG);
303		sun3_put_context(context);
304	}
305
306	/* Update PMEG management structures. */
307	// don't take pmeg's away from the kernel...
308	if(vaddr >= PAGE_OFFSET) {
309		/* map kernel pmegs into all contexts */
310		unsigned char i;
311
312		for(i = 0; i < CONTEXTS_NUM; i++) {
313			sun3_put_context(i);
314			sun3_put_segmap (vaddr, curr_pmeg);
315		}
316		sun3_put_context(context);
317		pmeg_alloc[curr_pmeg] = 2;
318		pmeg_ctx[curr_pmeg] = 0;
319
320	}
321	else {
322		pmeg_alloc[curr_pmeg] = 1;
323		pmeg_ctx[curr_pmeg] = context;
324		sun3_put_segmap (vaddr, curr_pmeg);
325
326	}
327	pmeg_vaddr[curr_pmeg] = vaddr;
328
329	/* Set hardware mapping and clear the old PTE entries. */
330	for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE)
331		sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
332
333	/* Consider a different one next time. */
334	++curr_pmeg;
335}
336
337/*
338 * Handle a pagefault at virtual address `vaddr'; check if there should be a
339 * page there (specifically, whether the software pagetables indicate that
340 * there is). This is necessary due to the limited size of the second-level
341 * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a
342 * mapping present, we select a `spare' PMEG and use it to create a mapping.
343 * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero
344 * if we successfully handled the fault.
345 */
346//todo: should we bump minor pagefault counter? if so, here or in caller?
347//todo: possibly inline this into bus_error030 in <asm/buserror.h> ?
348
349// kernel_fault is set when a kernel page couldn't be demand mapped,
350// and forces another try using the kernel page table.  basically a
351// hack so that vmalloc would work correctly.
352
353int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
354{
355	unsigned long segment, offset;
356	unsigned char context;
357	pte_t *pte;
358	pgd_t * crp;
359
360	if(current->mm == NULL) {
361		crp = swapper_pg_dir;
362		context = 0;
363	} else {
364		context = current->mm->context;
365		if(kernel_fault)
366			crp = swapper_pg_dir;
367		else
368			crp = current->mm->pgd;
369	}
370
371#ifdef DEBUG_MMU_EMU
372	pr_info("%s: vaddr=%lx type=%s crp=%p\n", __func__, vaddr,
373		read_flag ? "read" : "write", crp);
374#endif
375
376	segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
377	offset  = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
378
379#ifdef DEBUG_MMU_EMU
380	pr_info("%s: segment=%lx offset=%lx\n", __func__, segment, offset);
 
381#endif
382
383	pte = (pte_t *) pgd_val (*(crp + segment));
384
385//todo: next line should check for valid pmd properly.
386	if (!pte) {
387//                pr_info("mmu_emu_handle_fault: invalid pmd\n");
388                return 0;
389        }
390
391	pte = (pte_t *) __va ((unsigned long)(pte + offset));
392
393	/* Make sure this is a valid page */
394	if (!(pte_val (*pte) & SUN3_PAGE_VALID))
395		return 0;
396
397	/* Make sure there's a pmeg allocated for the page */
398	if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
399		mmu_emu_map_pmeg (context, vaddr);
400
401	/* Write the pte value to hardware MMU */
402	sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
403
404	/* Update software copy of the pte value */
405// I'm not sure this is necessary. If this is required, we ought to simply
406// copy this out when we reuse the PMEG or at some other convenient time.
407// Doing it here is fairly meaningless, anyway, as we only know about the
408// first access to a given page. --m
409	if (!read_flag) {
410		if (pte_val (*pte) & SUN3_PAGE_WRITEABLE)
411			pte_val (*pte) |= (SUN3_PAGE_ACCESSED
412					   | SUN3_PAGE_MODIFIED);
413		else
414			return 0;	/* Write-protect error. */
415	} else
416		pte_val (*pte) |= SUN3_PAGE_ACCESSED;
417
418#ifdef DEBUG_MMU_EMU
419	pr_info("seg:%ld crp:%p ->", get_fs().seg, crp);
420	print_pte_vaddr (vaddr);
421	pr_cont("\n");
422#endif
423
424	return 1;
425}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3** Tablewalk MMU emulator
  4**
  5** by Toshiyasu Morita
  6**
  7** Started 1/16/98 @ 2:22 am
  8*/
  9
 10#include <linux/init.h>
 11#include <linux/mman.h>
 12#include <linux/mm.h>
 13#include <linux/kernel.h>
 14#include <linux/ptrace.h>
 15#include <linux/delay.h>
 16#include <linux/bootmem.h>
 17#include <linux/bitops.h>
 18#include <linux/module.h>
 19#include <linux/sched/mm.h>
 20
 21#include <asm/setup.h>
 22#include <asm/traps.h>
 23#include <linux/uaccess.h>
 24#include <asm/page.h>
 25#include <asm/pgtable.h>
 26#include <asm/sun3mmu.h>
 27#include <asm/segment.h>
 28#include <asm/oplib.h>
 29#include <asm/mmu_context.h>
 30#include <asm/dvma.h>
 31
 32
 33#undef DEBUG_MMU_EMU
 34#define DEBUG_PROM_MAPS
 35
 36/*
 37** Defines
 38*/
 39
 40#define CONTEXTS_NUM		8
 41#define SEGMAPS_PER_CONTEXT_NUM 2048
 42#define PAGES_PER_SEGMENT	16
 43#define PMEGS_NUM		256
 44#define PMEG_MASK		0xFF
 45
 46/*
 47** Globals
 48*/
 49
 50unsigned long m68k_vmalloc_end;
 51EXPORT_SYMBOL(m68k_vmalloc_end);
 52
 53unsigned long pmeg_vaddr[PMEGS_NUM];
 54unsigned char pmeg_alloc[PMEGS_NUM];
 55unsigned char pmeg_ctx[PMEGS_NUM];
 56
 57/* pointers to the mm structs for each task in each
 58   context. 0xffffffff is a marker for kernel context */
 59static struct mm_struct *ctx_alloc[CONTEXTS_NUM] = {
 60    [0] = (struct mm_struct *)0xffffffff
 61};
 62
 63/* has this context been mmdrop'd? */
 64static unsigned char ctx_avail = CONTEXTS_NUM-1;
 65
 66/* array of pages to be marked off for the rom when we do mem_init later */
 67/* 256 pages lets the rom take up to 2mb of physical ram..  I really
 68   hope it never wants mote than that. */
 69unsigned long rom_pages[256];
 70
 71/* Print a PTE value in symbolic form. For debugging. */
 72void print_pte (pte_t pte)
 73{
 74#if 0
 75	/* Verbose version. */
 76	unsigned long val = pte_val (pte);
 77	pr_cont(" pte=%lx [addr=%lx",
 78		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
 79	if (val & SUN3_PAGE_VALID)	pr_cont(" valid");
 80	if (val & SUN3_PAGE_WRITEABLE)	pr_cont(" write");
 81	if (val & SUN3_PAGE_SYSTEM)	pr_cont(" sys");
 82	if (val & SUN3_PAGE_NOCACHE)	pr_cont(" nocache");
 83	if (val & SUN3_PAGE_ACCESSED)	pr_cont(" accessed");
 84	if (val & SUN3_PAGE_MODIFIED)	pr_cont(" modified");
 85	switch (val & SUN3_PAGE_TYPE_MASK) {
 86		case SUN3_PAGE_TYPE_MEMORY: pr_cont(" memory"); break;
 87		case SUN3_PAGE_TYPE_IO:     pr_cont(" io");     break;
 88		case SUN3_PAGE_TYPE_VME16:  pr_cont(" vme16");  break;
 89		case SUN3_PAGE_TYPE_VME32:  pr_cont(" vme32");  break;
 90	}
 91	pr_cont("]\n");
 92#else
 93	/* Terse version. More likely to fit on a line. */
 94	unsigned long val = pte_val (pte);
 95	char flags[7], *type;
 96
 97	flags[0] = (val & SUN3_PAGE_VALID)     ? 'v' : '-';
 98	flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-';
 99	flags[2] = (val & SUN3_PAGE_SYSTEM)    ? 's' : '-';
100	flags[3] = (val & SUN3_PAGE_NOCACHE)   ? 'x' : '-';
101	flags[4] = (val & SUN3_PAGE_ACCESSED)  ? 'a' : '-';
102	flags[5] = (val & SUN3_PAGE_MODIFIED)  ? 'm' : '-';
103	flags[6] = '\0';
104
105	switch (val & SUN3_PAGE_TYPE_MASK) {
106		case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break;
107		case SUN3_PAGE_TYPE_IO:     type = "io"    ; break;
108		case SUN3_PAGE_TYPE_VME16:  type = "vme16" ; break;
109		case SUN3_PAGE_TYPE_VME32:  type = "vme32" ; break;
110		default: type = "unknown?"; break;
111	}
112
113	pr_cont(" pte=%08lx [%07lx %s %s]\n",
114		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
115#endif
116}
117
118/* Print the PTE value for a given virtual address. For debugging. */
119void print_pte_vaddr (unsigned long vaddr)
120{
121	pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
122	print_pte (__pte (sun3_get_pte (vaddr)));
123}
124
125/*
126 * Initialise the MMU emulator.
127 */
128void __init mmu_emu_init(unsigned long bootmem_end)
129{
130	unsigned long seg, num;
131	int i,j;
132
133	memset(rom_pages, 0, sizeof(rom_pages));
134	memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr));
135	memset(pmeg_alloc, 0, sizeof(pmeg_alloc));
136	memset(pmeg_ctx, 0, sizeof(pmeg_ctx));
137
138	/* pmeg align the end of bootmem, adding another pmeg,
139	 * later bootmem allocations will likely need it */
140	bootmem_end = (bootmem_end + (2 * SUN3_PMEG_SIZE)) & ~SUN3_PMEG_MASK;
141
142	/* mark all of the pmegs used thus far as reserved */
143	for (i=0; i < __pa(bootmem_end) / SUN3_PMEG_SIZE ; ++i)
144		pmeg_alloc[i] = 2;
145
146
147	/* I'm thinking that most of the top pmeg's are going to be
148	   used for something, and we probably shouldn't risk it */
149	for(num = 0xf0; num <= 0xff; num++)
150		pmeg_alloc[num] = 2;
151
152	/* liberate all existing mappings in the rest of kernel space */
153	for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) {
154		i = sun3_get_segmap(seg);
155
156		if(!pmeg_alloc[i]) {
157#ifdef DEBUG_MMU_EMU
158			pr_info("freed:");
159			print_pte_vaddr (seg);
160#endif
161			sun3_put_segmap(seg, SUN3_INVALID_PMEG);
162		}
163	}
164
165	j = 0;
166	for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
167		if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
168#ifdef DEBUG_PROM_MAPS
169			for(i = 0; i < 16; i++) {
170				pr_info("mapped:");
171				print_pte_vaddr (seg + (i*PAGE_SIZE));
172				break;
173			}
174#endif
175			// the lowest mapping here is the end of our
176			// vmalloc region
177			if (!m68k_vmalloc_end)
178				m68k_vmalloc_end = seg;
179
180			// mark the segmap alloc'd, and reserve any
181			// of the first 0xbff pages the hardware is
182			// already using...  does any sun3 support > 24mb?
183			pmeg_alloc[sun3_get_segmap(seg)] = 2;
184		}
185	}
186
187	dvma_init();
188
189
190	/* blank everything below the kernel, and we've got the base
191	   mapping to start all the contexts off with... */
192	for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
193		sun3_put_segmap(seg, SUN3_INVALID_PMEG);
194
195	set_fs(MAKE_MM_SEG(3));
196	for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
197		i = sun3_get_segmap(seg);
198		for(j = 1; j < CONTEXTS_NUM; j++)
199			(*(romvec->pv_setctxt))(j, (void *)seg, i);
200	}
201	set_fs(KERNEL_DS);
202
203}
204
205/* erase the mappings for a dead context.  Uses the pg_dir for hints
206   as the pmeg tables proved somewhat unreliable, and unmapping all of
207   TASK_SIZE was much slower and no more stable. */
208/* todo: find a better way to keep track of the pmegs used by a
209   context for when they're cleared */
210void clear_context(unsigned long context)
211{
212     unsigned char oldctx;
213     unsigned long i;
214
215     if(context) {
216	     if(!ctx_alloc[context])
217		     panic("clear_context: context not allocated\n");
218
219	     ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
220	     ctx_alloc[context] = (struct mm_struct *)0;
221	     ctx_avail++;
222     }
223
224     oldctx = sun3_get_context();
225
226     sun3_put_context(context);
227
228     for(i = 0; i < SUN3_INVALID_PMEG; i++) {
229	     if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) {
230		     sun3_put_segmap(pmeg_vaddr[i], SUN3_INVALID_PMEG);
231		     pmeg_ctx[i] = 0;
232		     pmeg_alloc[i] = 0;
233		     pmeg_vaddr[i] = 0;
234	     }
235     }
236
237     sun3_put_context(oldctx);
238}
239
240/* gets an empty context.  if full, kills the next context listed to
241   die first */
242/* This context invalidation scheme is, well, totally arbitrary, I'm
243   sure it could be much more intelligent...  but it gets the job done
244   for now without much overhead in making it's decision. */
245/* todo: come up with optimized scheme for flushing contexts */
246unsigned long get_free_context(struct mm_struct *mm)
247{
248	unsigned long new = 1;
249	static unsigned char next_to_die = 1;
250
251	if(!ctx_avail) {
252		/* kill someone to get our context */
253		new = next_to_die;
254		clear_context(new);
255		next_to_die = (next_to_die + 1) & 0x7;
256		if(!next_to_die)
257			next_to_die++;
258	} else {
259		while(new < CONTEXTS_NUM) {
260			if(ctx_alloc[new])
261				new++;
262			else
263				break;
264		}
265		// check to make sure one was really free...
266		if(new == CONTEXTS_NUM)
267			panic("get_free_context: failed to find free context");
268	}
269
270	ctx_alloc[new] = mm;
271	ctx_avail--;
272
273	return new;
274}
275
276/*
277 * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
278 * `context'. Maintain internal PMEG management structures. This doesn't
279 * actually map the physical address, but does clear the old mappings.
280 */
281//todo: better allocation scheme? but is extra complexity worthwhile?
282//todo: only clear old entries if necessary? how to tell?
283
284inline void mmu_emu_map_pmeg (int context, int vaddr)
285{
286	static unsigned char curr_pmeg = 128;
287	int i;
288
289	/* Round address to PMEG boundary. */
290	vaddr &= ~SUN3_PMEG_MASK;
291
292	/* Find a spare one. */
293	while (pmeg_alloc[curr_pmeg] == 2)
294		++curr_pmeg;
295
296
297#ifdef DEBUG_MMU_EMU
298	pr_info("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
299		curr_pmeg, context, vaddr);
300#endif
301
302	/* Invalidate old mapping for the pmeg, if any */
303	if (pmeg_alloc[curr_pmeg] == 1) {
304		sun3_put_context(pmeg_ctx[curr_pmeg]);
305		sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG);
306		sun3_put_context(context);
307	}
308
309	/* Update PMEG management structures. */
310	// don't take pmeg's away from the kernel...
311	if(vaddr >= PAGE_OFFSET) {
312		/* map kernel pmegs into all contexts */
313		unsigned char i;
314
315		for(i = 0; i < CONTEXTS_NUM; i++) {
316			sun3_put_context(i);
317			sun3_put_segmap (vaddr, curr_pmeg);
318		}
319		sun3_put_context(context);
320		pmeg_alloc[curr_pmeg] = 2;
321		pmeg_ctx[curr_pmeg] = 0;
322
323	}
324	else {
325		pmeg_alloc[curr_pmeg] = 1;
326		pmeg_ctx[curr_pmeg] = context;
327		sun3_put_segmap (vaddr, curr_pmeg);
328
329	}
330	pmeg_vaddr[curr_pmeg] = vaddr;
331
332	/* Set hardware mapping and clear the old PTE entries. */
333	for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE)
334		sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
335
336	/* Consider a different one next time. */
337	++curr_pmeg;
338}
339
340/*
341 * Handle a pagefault at virtual address `vaddr'; check if there should be a
342 * page there (specifically, whether the software pagetables indicate that
343 * there is). This is necessary due to the limited size of the second-level
344 * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a
345 * mapping present, we select a `spare' PMEG and use it to create a mapping.
346 * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero
347 * if we successfully handled the fault.
348 */
349//todo: should we bump minor pagefault counter? if so, here or in caller?
350//todo: possibly inline this into bus_error030 in <asm/buserror.h> ?
351
352// kernel_fault is set when a kernel page couldn't be demand mapped,
353// and forces another try using the kernel page table.  basically a
354// hack so that vmalloc would work correctly.
355
356int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
357{
358	unsigned long segment, offset;
359	unsigned char context;
360	pte_t *pte;
361	pgd_t * crp;
362
363	if(current->mm == NULL) {
364		crp = swapper_pg_dir;
365		context = 0;
366	} else {
367		context = current->mm->context;
368		if(kernel_fault)
369			crp = swapper_pg_dir;
370		else
371			crp = current->mm->pgd;
372	}
373
374#ifdef DEBUG_MMU_EMU
375	pr_info("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
376		vaddr, read_flag ? "read" : "write", crp);
377#endif
378
379	segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
380	offset  = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
381
382#ifdef DEBUG_MMU_EMU
383	pr_info("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment,
384		offset);
385#endif
386
387	pte = (pte_t *) pgd_val (*(crp + segment));
388
389//todo: next line should check for valid pmd properly.
390	if (!pte) {
391//                pr_info("mmu_emu_handle_fault: invalid pmd\n");
392                return 0;
393        }
394
395	pte = (pte_t *) __va ((unsigned long)(pte + offset));
396
397	/* Make sure this is a valid page */
398	if (!(pte_val (*pte) & SUN3_PAGE_VALID))
399		return 0;
400
401	/* Make sure there's a pmeg allocated for the page */
402	if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
403		mmu_emu_map_pmeg (context, vaddr);
404
405	/* Write the pte value to hardware MMU */
406	sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
407
408	/* Update software copy of the pte value */
409// I'm not sure this is necessary. If this is required, we ought to simply
410// copy this out when we reuse the PMEG or at some other convenient time.
411// Doing it here is fairly meaningless, anyway, as we only know about the
412// first access to a given page. --m
413	if (!read_flag) {
414		if (pte_val (*pte) & SUN3_PAGE_WRITEABLE)
415			pte_val (*pte) |= (SUN3_PAGE_ACCESSED
416					   | SUN3_PAGE_MODIFIED);
417		else
418			return 0;	/* Write-protect error. */
419	} else
420		pte_val (*pte) |= SUN3_PAGE_ACCESSED;
421
422#ifdef DEBUG_MMU_EMU
423	pr_info("seg:%ld crp:%p ->", get_fs().seg, crp);
424	print_pte_vaddr (vaddr);
425	pr_cont("\n");
426#endif
427
428	return 1;
429}