Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/arch/m68k/mm/motorola.c
  4 *
  5 * Routines specific to the Motorola MMU, originally from:
  6 * linux/arch/m68k/init.c
  7 * which are Copyright (C) 1995 Hamish Macdonald
  8 *
  9 * Moved 8/20/1999 Sam Creasey
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/signal.h>
 14#include <linux/sched.h>
 15#include <linux/mm.h>
 16#include <linux/swap.h>
 17#include <linux/kernel.h>
 18#include <linux/string.h>
 19#include <linux/types.h>
 20#include <linux/init.h>
 21#include <linux/memblock.h>
 22#include <linux/gfp.h>
 23
 24#include <asm/setup.h>
 25#include <linux/uaccess.h>
 26#include <asm/page.h>
 27#include <asm/pgalloc.h>
 28#include <asm/machdep.h>
 29#include <asm/io.h>
 30#include <asm/dma.h>
 31#ifdef CONFIG_ATARI
 32#include <asm/atari_stram.h>
 33#endif
 34#include <asm/sections.h>
 35
 36#undef DEBUG
 37
 38#ifndef mm_cachebits
 39/*
 40 * Bits to add to page descriptors for "normal" caching mode.
 41 * For 68020/030 this is 0.
 42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
 43 */
 44unsigned long mm_cachebits;
 45EXPORT_SYMBOL(mm_cachebits);
 46#endif
 47
 48/* Prior to calling these routines, the page should have been flushed
 49 * from both the cache and ATC, or the CPU might not notice that the
 50 * cache setting for the page has been changed. -jskov
 51 */
 52static inline void nocache_page(void *vaddr)
 53{
 54	unsigned long addr = (unsigned long)vaddr;
 55
 56	if (CPU_IS_040_OR_060) {
 57		pte_t *ptep = virt_to_kpte(addr);
 58
 59		*ptep = pte_mknocache(*ptep);
 60	}
 61}
 62
 63static inline void cache_page(void *vaddr)
 64{
 65	unsigned long addr = (unsigned long)vaddr;
 66
 67	if (CPU_IS_040_OR_060) {
 68		pte_t *ptep = virt_to_kpte(addr);
 69
 70		*ptep = pte_mkcache(*ptep);
 71	}
 72}
 73
 74/*
 75 * Motorola 680x0 user's manual recommends using uncached memory for address
 76 * translation tables.
 77 *
 78 * Seeing how the MMU can be external on (some of) these chips, that seems like
 79 * a very important recommendation to follow. Provide some helpers to combat
 80 * 'variation' amongst the users of this.
 81 */
 82
 83void mmu_page_ctor(void *page)
 84{
 85	__flush_page_to_ram(page);
 86	flush_tlb_kernel_page(page);
 87	nocache_page(page);
 88}
 89
 90void mmu_page_dtor(void *page)
 91{
 92	cache_page(page);
 93}
 94
 95/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
 96   struct page instead of separately kmalloced struct.  Stolen from
 97   arch/sparc/mm/srmmu.c ... */
 98
 99typedef struct list_head ptable_desc;
100
101static struct list_head ptable_list[2] = {
102	LIST_HEAD_INIT(ptable_list[0]),
103	LIST_HEAD_INIT(ptable_list[1]),
104};
105
106#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
107#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
108#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
109
110static const int ptable_shift[2] = {
111	7+2, /* PGD, PMD */
112	6+2, /* PTE */
113};
114
115#define ptable_size(type) (1U << ptable_shift[type])
116#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
117
118void __init init_pointer_table(void *table, int type)
119{
120	ptable_desc *dp;
121	unsigned long ptable = (unsigned long)table;
122	unsigned long page = ptable & PAGE_MASK;
123	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
124
125	dp = PD_PTABLE(page);
126	if (!(PD_MARKBITS(dp) & mask)) {
127		PD_MARKBITS(dp) = ptable_mask(type);
128		list_add(dp, &ptable_list[type]);
129	}
130
131	PD_MARKBITS(dp) &= ~mask;
132	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
133
134	/* unreserve the page so it's possible to free that page */
135	__ClearPageReserved(PD_PAGE(dp));
136	init_page_count(PD_PAGE(dp));
137
138	return;
139}
140
141void *get_pointer_table(int type)
142{
143	ptable_desc *dp = ptable_list[type].next;
144	unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
145	unsigned int tmp, off;
146
147	/*
148	 * For a pointer table for a user process address space, a
149	 * table is taken from a page allocated for the purpose.  Each
150	 * page can hold 8 pointer tables.  The page is remapped in
151	 * virtual address space to be noncacheable.
152	 */
153	if (mask == 0) {
154		void *page;
155		ptable_desc *new;
156
157		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
158			return NULL;
159
160		if (type == TABLE_PTE) {
161			/*
162			 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
163			 * SMP.
164			 */
165			pgtable_pte_page_ctor(virt_to_page(page));
166		}
167
168		mmu_page_ctor(page);
169
170		new = PD_PTABLE(page);
171		PD_MARKBITS(new) = ptable_mask(type) - 1;
172		list_add_tail(new, dp);
173
174		return (pmd_t *)page;
175	}
176
177	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
178		;
179	PD_MARKBITS(dp) = mask & ~tmp;
180	if (!PD_MARKBITS(dp)) {
181		/* move to end of list */
182		list_move_tail(dp, &ptable_list[type]);
183	}
184	return page_address(PD_PAGE(dp)) + off;
185}
186
187int free_pointer_table(void *table, int type)
188{
189	ptable_desc *dp;
190	unsigned long ptable = (unsigned long)table;
191	unsigned long page = ptable & PAGE_MASK;
192	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
193
194	dp = PD_PTABLE(page);
195	if (PD_MARKBITS (dp) & mask)
196		panic ("table already free!");
197
198	PD_MARKBITS (dp) |= mask;
199
200	if (PD_MARKBITS(dp) == ptable_mask(type)) {
201		/* all tables in page are free, free page */
202		list_del(dp);
203		mmu_page_dtor((void *)page);
204		if (type == TABLE_PTE)
205			pgtable_pte_page_dtor(virt_to_page(page));
206		free_page (page);
207		return 1;
208	} else if (ptable_list[type].next != dp) {
209		/*
210		 * move this descriptor to the front of the list, since
211		 * it has one or more free tables.
212		 */
213		list_move(dp, &ptable_list[type]);
214	}
215	return 0;
216}
217
218/* size of memory already mapped in head.S */
219extern __initdata unsigned long m68k_init_mapped_size;
220
221extern unsigned long availmem;
222
223static pte_t *last_pte_table __initdata = NULL;
224
225static pte_t * __init kernel_page_table(void)
226{
227	pte_t *pte_table = last_pte_table;
228
229	if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
230		pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
231		if (!pte_table) {
232			panic("%s: Failed to allocate %lu bytes align=%lx\n",
233					__func__, PAGE_SIZE, PAGE_SIZE);
234		}
235
236		clear_page(pte_table);
237		mmu_page_ctor(pte_table);
 
 
238
239		last_pte_table = pte_table;
240	}
241
242	last_pte_table += PTRS_PER_PTE;
243
244	return pte_table;
245}
246
247static pmd_t *last_pmd_table __initdata = NULL;
 
248
249static pmd_t * __init kernel_ptr_table(void)
250{
251	if (!last_pmd_table) {
252		unsigned long pmd, last;
253		int i;
254
255		/* Find the last ptr table that was used in head.S and
256		 * reuse the remaining space in that page for further
257		 * ptr tables.
258		 */
259		last = (unsigned long)kernel_pg_dir;
260		for (i = 0; i < PTRS_PER_PGD; i++) {
261			pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
262
263			if (!pud_present(*pud))
264				continue;
265			pmd = pgd_page_vaddr(kernel_pg_dir[i]);
266			if (pmd > last)
267				last = pmd;
268		}
269
270		last_pmd_table = (pmd_t *)last;
271#ifdef DEBUG
272		printk("kernel_ptr_init: %p\n", last_pmd_table);
273#endif
274	}
275
276	last_pmd_table += PTRS_PER_PMD;
277	if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
278		last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
279							   PAGE_SIZE);
280		if (!last_pmd_table)
281			panic("%s: Failed to allocate %lu bytes align=%lx\n",
282			      __func__, PAGE_SIZE, PAGE_SIZE);
283
284		clear_page(last_pmd_table);
285		mmu_page_ctor(last_pmd_table);
 
 
286	}
287
288	return last_pmd_table;
289}
290
291static void __init map_node(int node)
292{
 
 
293	unsigned long physaddr, virtaddr, size;
294	pgd_t *pgd_dir;
295	p4d_t *p4d_dir;
296	pud_t *pud_dir;
297	pmd_t *pmd_dir;
298	pte_t *pte_dir;
299
300	size = m68k_memory[node].size;
301	physaddr = m68k_memory[node].addr;
302	virtaddr = (unsigned long)phys_to_virt(physaddr);
303	physaddr |= m68k_supervisor_cachemode |
304		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
305	if (CPU_IS_040_OR_060)
306		physaddr |= _PAGE_GLOBAL040;
307
308	while (size > 0) {
309#ifdef DEBUG
310		if (!(virtaddr & (PMD_SIZE-1)))
311			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
312				virtaddr);
313#endif
314		pgd_dir = pgd_offset_k(virtaddr);
315		if (virtaddr && CPU_IS_020_OR_030) {
316			if (!(virtaddr & (PGDIR_SIZE-1)) &&
317			    size >= PGDIR_SIZE) {
318#ifdef DEBUG
319				printk ("[very early term]");
320#endif
321				pgd_val(*pgd_dir) = physaddr;
322				size -= PGDIR_SIZE;
323				virtaddr += PGDIR_SIZE;
324				physaddr += PGDIR_SIZE;
325				continue;
326			}
327		}
328		p4d_dir = p4d_offset(pgd_dir, virtaddr);
329		pud_dir = pud_offset(p4d_dir, virtaddr);
330		if (!pud_present(*pud_dir)) {
331			pmd_dir = kernel_ptr_table();
332#ifdef DEBUG
333			printk ("[new pointer %p]", pmd_dir);
334#endif
335			pud_set(pud_dir, pmd_dir);
336		} else
337			pmd_dir = pmd_offset(pud_dir, virtaddr);
338
339		if (CPU_IS_020_OR_030) {
340			if (virtaddr) {
341#ifdef DEBUG
342				printk ("[early term]");
343#endif
344				pmd_val(*pmd_dir) = physaddr;
345				physaddr += PMD_SIZE;
346			} else {
347				int i;
348#ifdef DEBUG
349				printk ("[zero map]");
350#endif
351				pte_dir = kernel_page_table();
352				pmd_set(pmd_dir, pte_dir);
353
 
354				pte_val(*pte_dir++) = 0;
355				physaddr += PAGE_SIZE;
356				for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
357					pte_val(*pte_dir++) = physaddr;
358			}
359			size -= PMD_SIZE;
360			virtaddr += PMD_SIZE;
361		} else {
362			if (!pmd_present(*pmd_dir)) {
363#ifdef DEBUG
364				printk ("[new table]");
365#endif
366				pte_dir = kernel_page_table();
367				pmd_set(pmd_dir, pte_dir);
368			}
369			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
370
371			if (virtaddr) {
372				if (!pte_present(*pte_dir))
373					pte_val(*pte_dir) = physaddr;
374			} else
375				pte_val(*pte_dir) = 0;
376			size -= PAGE_SIZE;
377			virtaddr += PAGE_SIZE;
378			physaddr += PAGE_SIZE;
379		}
380
381	}
382#ifdef DEBUG
383	printk("\n");
384#endif
385}
386
387/*
388 * paging_init() continues the virtual memory environment setup which
389 * was begun by the code in arch/head.S.
390 */
391void __init paging_init(void)
392{
393	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
394	unsigned long min_addr, max_addr;
395	unsigned long addr;
396	int i;
397
398#ifdef DEBUG
399	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
400#endif
401
402	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
403	if (CPU_IS_040_OR_060) {
404		int i;
405#ifndef mm_cachebits
406		mm_cachebits = _PAGE_CACHE040;
407#endif
408		for (i = 0; i < 16; i++)
409			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
410	}
411
412	min_addr = m68k_memory[0].addr;
413	max_addr = min_addr + m68k_memory[0].size;
414	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
415	for (i = 1; i < m68k_num_memory;) {
416		if (m68k_memory[i].addr < min_addr) {
417			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
418				m68k_memory[i].addr, m68k_memory[i].size);
419			printk("Fix your bootloader or use a memfile to make use of this area!\n");
420			m68k_num_memory--;
421			memmove(m68k_memory + i, m68k_memory + i + 1,
422				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
423			continue;
424		}
425		memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i);
426		addr = m68k_memory[i].addr + m68k_memory[i].size;
427		if (addr > max_addr)
428			max_addr = addr;
429		i++;
430	}
431	m68k_memoffset = min_addr - PAGE_OFFSET;
432	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
433
434	module_fixup(NULL, __start_fixup, __stop_fixup);
435	flush_icache();
436
437	high_memory = phys_to_virt(max_addr);
438
439	min_low_pfn = availmem >> PAGE_SHIFT;
440	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
441
442	/* Reserve kernel text/data/bss and the memory allocated in head.S */
443	memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
 
 
 
 
 
 
 
 
444
445	/*
446	 * Map the physical memory available into the kernel virtual
447	 * address space. Make sure memblock will not try to allocate
448	 * pages beyond the memory we already mapped in head.S
 
449	 */
450	memblock_set_bottom_up(true);
 
 
 
 
 
 
 
451
452	for (i = 0; i < m68k_num_memory; i++) {
453		m68k_setup_node(i);
454		map_node(i);
455	}
456
457	flush_tlb_all();
458
459	/*
460	 * initialize the bad page table and bad page to point
461	 * to a couple of allocated pages
462	 */
463	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
464	if (!empty_zero_page)
465		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
466		      __func__, PAGE_SIZE, PAGE_SIZE);
467
468	/*
469	 * Set up SFC/DFC registers
470	 */
471	set_fs(KERNEL_DS);
472
473#ifdef DEBUG
474	printk ("before free_area_init\n");
475#endif
476	for (i = 0; i < m68k_num_memory; i++)
 
 
 
477		if (node_present_pages(i))
478			node_set_state(i, N_NORMAL_MEMORY);
479
480	max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
481	free_area_init(max_zone_pfn);
482}
v4.6
 
  1/*
  2 * linux/arch/m68k/mm/motorola.c
  3 *
  4 * Routines specific to the Motorola MMU, originally from:
  5 * linux/arch/m68k/init.c
  6 * which are Copyright (C) 1995 Hamish Macdonald
  7 *
  8 * Moved 8/20/1999 Sam Creasey
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/signal.h>
 13#include <linux/sched.h>
 14#include <linux/mm.h>
 15#include <linux/swap.h>
 16#include <linux/kernel.h>
 17#include <linux/string.h>
 18#include <linux/types.h>
 19#include <linux/init.h>
 20#include <linux/bootmem.h>
 21#include <linux/gfp.h>
 22
 23#include <asm/setup.h>
 24#include <asm/uaccess.h>
 25#include <asm/page.h>
 26#include <asm/pgalloc.h>
 27#include <asm/machdep.h>
 28#include <asm/io.h>
 29#include <asm/dma.h>
 30#ifdef CONFIG_ATARI
 31#include <asm/atari_stram.h>
 32#endif
 33#include <asm/sections.h>
 34
 35#undef DEBUG
 36
 37#ifndef mm_cachebits
 38/*
 39 * Bits to add to page descriptors for "normal" caching mode.
 40 * For 68020/030 this is 0.
 41 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
 42 */
 43unsigned long mm_cachebits;
 44EXPORT_SYMBOL(mm_cachebits);
 45#endif
 46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47/* size of memory already mapped in head.S */
 48extern __initdata unsigned long m68k_init_mapped_size;
 49
 50extern unsigned long availmem;
 51
 
 
 52static pte_t * __init kernel_page_table(void)
 53{
 54	pte_t *ptablep;
 55
 56	ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
 
 
 
 
 
 57
 58	clear_page(ptablep);
 59	__flush_page_to_ram(ptablep);
 60	flush_tlb_kernel_page(ptablep);
 61	nocache_page(ptablep);
 62
 63	return ptablep;
 
 
 
 
 
 64}
 65
 66static pmd_t *last_pgtable __initdata = NULL;
 67pmd_t *zero_pgtable __initdata = NULL;
 68
 69static pmd_t * __init kernel_ptr_table(void)
 70{
 71	if (!last_pgtable) {
 72		unsigned long pmd, last;
 73		int i;
 74
 75		/* Find the last ptr table that was used in head.S and
 76		 * reuse the remaining space in that page for further
 77		 * ptr tables.
 78		 */
 79		last = (unsigned long)kernel_pg_dir;
 80		for (i = 0; i < PTRS_PER_PGD; i++) {
 81			if (!pgd_present(kernel_pg_dir[i]))
 
 
 82				continue;
 83			pmd = __pgd_page(kernel_pg_dir[i]);
 84			if (pmd > last)
 85				last = pmd;
 86		}
 87
 88		last_pgtable = (pmd_t *)last;
 89#ifdef DEBUG
 90		printk("kernel_ptr_init: %p\n", last_pgtable);
 91#endif
 92	}
 93
 94	last_pgtable += PTRS_PER_PMD;
 95	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
 96		last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
 
 
 
 
 97
 98		clear_page(last_pgtable);
 99		__flush_page_to_ram(last_pgtable);
100		flush_tlb_kernel_page(last_pgtable);
101		nocache_page(last_pgtable);
102	}
103
104	return last_pgtable;
105}
106
107static void __init map_node(int node)
108{
109#define PTRTREESIZE (256*1024)
110#define ROOTTREESIZE (32*1024*1024)
111	unsigned long physaddr, virtaddr, size;
112	pgd_t *pgd_dir;
 
 
113	pmd_t *pmd_dir;
114	pte_t *pte_dir;
115
116	size = m68k_memory[node].size;
117	physaddr = m68k_memory[node].addr;
118	virtaddr = (unsigned long)phys_to_virt(physaddr);
119	physaddr |= m68k_supervisor_cachemode |
120		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
121	if (CPU_IS_040_OR_060)
122		physaddr |= _PAGE_GLOBAL040;
123
124	while (size > 0) {
125#ifdef DEBUG
126		if (!(virtaddr & (PTRTREESIZE-1)))
127			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
128				virtaddr);
129#endif
130		pgd_dir = pgd_offset_k(virtaddr);
131		if (virtaddr && CPU_IS_020_OR_030) {
132			if (!(virtaddr & (ROOTTREESIZE-1)) &&
133			    size >= ROOTTREESIZE) {
134#ifdef DEBUG
135				printk ("[very early term]");
136#endif
137				pgd_val(*pgd_dir) = physaddr;
138				size -= ROOTTREESIZE;
139				virtaddr += ROOTTREESIZE;
140				physaddr += ROOTTREESIZE;
141				continue;
142			}
143		}
144		if (!pgd_present(*pgd_dir)) {
 
 
145			pmd_dir = kernel_ptr_table();
146#ifdef DEBUG
147			printk ("[new pointer %p]", pmd_dir);
148#endif
149			pgd_set(pgd_dir, pmd_dir);
150		} else
151			pmd_dir = pmd_offset(pgd_dir, virtaddr);
152
153		if (CPU_IS_020_OR_030) {
154			if (virtaddr) {
155#ifdef DEBUG
156				printk ("[early term]");
157#endif
158				pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
159				physaddr += PTRTREESIZE;
160			} else {
161				int i;
162#ifdef DEBUG
163				printk ("[zero map]");
164#endif
165				zero_pgtable = kernel_ptr_table();
166				pte_dir = (pte_t *)zero_pgtable;
167				pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
168					_PAGE_TABLE | _PAGE_ACCESSED;
169				pte_val(*pte_dir++) = 0;
170				physaddr += PAGE_SIZE;
171				for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
172					pte_val(*pte_dir++) = physaddr;
173			}
174			size -= PTRTREESIZE;
175			virtaddr += PTRTREESIZE;
176		} else {
177			if (!pmd_present(*pmd_dir)) {
178#ifdef DEBUG
179				printk ("[new table]");
180#endif
181				pte_dir = kernel_page_table();
182				pmd_set(pmd_dir, pte_dir);
183			}
184			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
185
186			if (virtaddr) {
187				if (!pte_present(*pte_dir))
188					pte_val(*pte_dir) = physaddr;
189			} else
190				pte_val(*pte_dir) = 0;
191			size -= PAGE_SIZE;
192			virtaddr += PAGE_SIZE;
193			physaddr += PAGE_SIZE;
194		}
195
196	}
197#ifdef DEBUG
198	printk("\n");
199#endif
200}
201
202/*
203 * paging_init() continues the virtual memory environment setup which
204 * was begun by the code in arch/head.S.
205 */
206void __init paging_init(void)
207{
208	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
209	unsigned long min_addr, max_addr;
210	unsigned long addr, size, end;
211	int i;
212
213#ifdef DEBUG
214	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
215#endif
216
217	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
218	if (CPU_IS_040_OR_060) {
219		int i;
220#ifndef mm_cachebits
221		mm_cachebits = _PAGE_CACHE040;
222#endif
223		for (i = 0; i < 16; i++)
224			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
225	}
226
227	min_addr = m68k_memory[0].addr;
228	max_addr = min_addr + m68k_memory[0].size;
 
229	for (i = 1; i < m68k_num_memory;) {
230		if (m68k_memory[i].addr < min_addr) {
231			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
232				m68k_memory[i].addr, m68k_memory[i].size);
233			printk("Fix your bootloader or use a memfile to make use of this area!\n");
234			m68k_num_memory--;
235			memmove(m68k_memory + i, m68k_memory + i + 1,
236				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
237			continue;
238		}
 
239		addr = m68k_memory[i].addr + m68k_memory[i].size;
240		if (addr > max_addr)
241			max_addr = addr;
242		i++;
243	}
244	m68k_memoffset = min_addr - PAGE_OFFSET;
245	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
246
247	module_fixup(NULL, __start_fixup, __stop_fixup);
248	flush_icache();
249
250	high_memory = phys_to_virt(max_addr);
251
252	min_low_pfn = availmem >> PAGE_SHIFT;
253	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
254
255	for (i = 0; i < m68k_num_memory; i++) {
256		addr = m68k_memory[i].addr;
257		end = addr + m68k_memory[i].size;
258		m68k_setup_node(i);
259		availmem = PAGE_ALIGN(availmem);
260		availmem += init_bootmem_node(NODE_DATA(i),
261					      availmem >> PAGE_SHIFT,
262					      addr >> PAGE_SHIFT,
263					      end >> PAGE_SHIFT);
264	}
265
266	/*
267	 * Map the physical memory available into the kernel virtual
268	 * address space. First initialize the bootmem allocator with
269	 * the memory we already mapped, so map_node() has something
270	 * to allocate.
271	 */
272	addr = m68k_memory[0].addr;
273	size = m68k_memory[0].size;
274	free_bootmem_node(NODE_DATA(0), availmem,
275			  min(m68k_init_mapped_size, size) - (availmem - addr));
276	map_node(0);
277	if (size > m68k_init_mapped_size)
278		free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
279				  size - m68k_init_mapped_size);
280
281	for (i = 1; i < m68k_num_memory; i++)
 
282		map_node(i);
 
283
284	flush_tlb_all();
285
286	/*
287	 * initialize the bad page table and bad page to point
288	 * to a couple of allocated pages
289	 */
290	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
 
 
 
291
292	/*
293	 * Set up SFC/DFC registers
294	 */
295	set_fs(KERNEL_DS);
296
297#ifdef DEBUG
298	printk ("before free_area_init\n");
299#endif
300	for (i = 0; i < m68k_num_memory; i++) {
301		zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
302		free_area_init_node(i, zones_size,
303				    m68k_memory[i].addr >> PAGE_SHIFT, NULL);
304		if (node_present_pages(i))
305			node_set_state(i, N_NORMAL_MEMORY);
306	}
 
 
307}
308